input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
_base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './queryinst_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
|
import traceback
from typing import Optional
from jina.proto import jina_pb2
from jina.serve.executors import BaseExecutor
from jina.types.mixin import ProtoTypeMixin
class Request(ProtoTypeMixin):
"""
:class:`Request` is one of the primitive data types in Jina, and serves as a base for
:class:`~data.DataRequest` and :class:`~data.Response`.
It offers a Pythonic interface to allow users access and manipulate
:class:`jina.jina_pb2.RequestProto` object without working with Protobuf itself.
It serves as a container for serialized :class:`jina_pb2.RequestProto` that only triggers deserialization
and decompression when receives the first read access to its member.
It overrides :meth:`__getattr__` to provide the same get/set interface as an
:class:`jina_pb2.RequestProto` object.
"""
def __getattr__(self, name: str):
return getattr(self.proto, name)
def add_exception(
self, ex: Optional['Exception'] = None, executor: 'BaseExecutor' = None
) -> None:
"""Add exception to the last route in the envelope
:param ex: Exception to be added
:param executor: Executor related to the exception
"""
d = self.header.status
d.code = jina_pb2.StatusProto.ERROR
d.description = repr(ex)
if executor:
d.exception.executor = executor.__class__.__name__
d.exception.name = ex.__class__.__name__
d.exception.args.extend([str(v) for v in ex.args])
d.exception.stacks.extend(
traceback.format_exception(type(ex), value=ex, tb=ex.__traceback__)
)
|
import traceback
from typing import Optional
from jina.proto import jina_pb2
from jina.serve.executors import BaseExecutor
from jina.types.mixin import ProtoTypeMixin
class Request(ProtoTypeMixin):
"""
:class:`Request` is one of the primitive data types in Jina, and serves as a base for
:class:`~data.DataRequest` and :class:`~data.Response`.
It offers a Pythonic interface to allow users access and manipulate
:class:`jina.jina_pb2.RequestProto` object without working with Protobuf itself.
It serves as a container for serialized :class:`jina_pb2.RequestProto` that only triggers deserialization
and decompression when receives the first read access to its member.
It overrides :meth:`__getattr__` to provide the same get/set interface as an
:class:`jina_pb2.RequestProto` object.
"""
def __getattr__(self, name: str):
return getattr(self.proto, name)
def add_exception(
self, ex: Optional['Exception'] = None, executor: 'BaseExecutor' = None
) -> None:
"""Add exception to the last route in the envelope
:param ex: Exception to be added
:param executor: Executor related to the exception
"""
d = self.header.status
d.code = jina_pb2.StatusProto.ERROR
d.description = repr(ex)
if executor:
d.exception.executor = executor.__class__.__name__
d.exception.name = ex.__class__.__name__
d.exception.args.extend([str(v) for v in ex.args])
d.exception.stacks.extend(
traceback.format_exception(etype=type(ex), value=ex, tb=ex.__traceback__)
)
|
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model setting
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
default_hooks = dict(optimizer=dict(type='OptimizerHook', grad_clip=None))
|
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# dataset settings
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer_config = dict(_delete_=True, grad_clip=None)
lr_config = dict(warmup='linear')
|
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
):
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
# coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import platform
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate env variable is set",
)
def test_cpu_and_gpu_work():
# If compiled appropriately, the same installation will support both GPU and CPU.
X, y = load_breast_cancer(return_X_y=True)
data = lgb.Dataset(X, y)
params_cpu = {"verbosity": -1, "num_leaves": 31, "objective": "binary", "device": "cpu"}
cpu_bst = lgb.train(params_cpu, data, num_boost_round=10)
cpu_score = log_loss(y, cpu_bst.predict(X))
params_gpu = params_cpu.copy()
params_gpu["device"] = "gpu"
# Double-precision floats are only supported on x86_64 with PoCL
params_gpu["gpu_use_dp"] = (platform.machine() == "x86_64")
gpu_bst = lgb.train(params_gpu, data, num_boost_round=10)
gpu_score = log_loss(y, gpu_bst.predict(X))
rel = 1e-6 if params_gpu["gpu_use_dp"] else 1e-4
assert cpu_score == pytest.approx(gpu_score, rel=rel)
assert gpu_score < 0.242
|
# coding: utf-8
"""Tests for dual GPU+CPU support."""
import os
import pytest
from sklearn.metrics import log_loss
import lightgbm as lgb
from .utils import load_breast_cancer
@pytest.mark.skipif(
os.environ.get("LIGHTGBM_TEST_DUAL_CPU_GPU", None) is None,
reason="Only run if appropriate env variable is set",
)
def test_cpu_and_gpu_work():
# If compiled appropriately, the same installation will support both GPU and CPU.
X, y = load_breast_cancer(return_X_y=True)
data = lgb.Dataset(X, y)
params_cpu = {"verbosity": -1, "num_leaves": 31, "objective": "binary", "device": "cpu"}
cpu_bst = lgb.train(params_cpu, data, num_boost_round=10)
cpu_score = log_loss(y, cpu_bst.predict(X))
params_gpu = params_cpu.copy()
params_gpu["device"] = "gpu"
params_gpu["gpu_use_dp"] = True
gpu_bst = lgb.train(params_gpu, data, num_boost_round=10)
gpu_score = log_loss(y, gpu_bst.predict(X))
assert cpu_score == pytest.approx(gpu_score)
assert gpu_score < 0.242
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
from keras.src.api_export import keras_export
from keras.src.layers.pooling.base_pooling import BasePooling
@keras_export(["keras.layers.AveragePooling2D", "keras.layers.AvgPool2D"])
class AveragePooling2D(BasePooling):
"""Average pooling operation for 2D spatial data.
Downsamples the input along its spatial dimensions (height and width)
by taking the average value over an input window
(of size defined by `pool_size`) for each channel of the input.
The window is shifted by `strides` along each dimension.
The resulting output when using the `"valid"` padding option has a spatial
shape (number of rows or columns) of:
`output_shape = math.floor((input_shape - pool_size) / strides) + 1`
(when `input_shape >= pool_size`)
The resulting output shape when using the `"same"` padding option is:
`output_shape = math.floor((input_shape - 1) / strides) + 1`
Args:
pool_size: int or tuple of 2 integers, factors by which to downscale
(dim1, dim2). If only one integer is specified, the same
window length will be used for all dimensions.
strides: int or tuple of 2 integers, or None. Strides values. If None,
it will default to `pool_size`. If only one int is specified, the
same stride size will be used for all dimensions.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
Input shape:
- If `data_format="channels_last"`:
4D tensor with shape `(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape `(batch_size, channels, height, width)`.
Output shape:
- If `data_format="channels_last"`:
4D tensor with shape
`(batch_size, pooled_height, pooled_width, channels)`.
- If `data_format="channels_first"`:
4D tensor with shape
`(batch_size, channels, pooled_height, pooled_width)`.
Examples:
`strides=(1, 1)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="valid")
>>> avg_pool_2d(x)
`strides=(2, 2)` and `padding="valid"`:
>>> x = np.array([[1., 2., 3., 4.],
... [5., 6., 7., 8.],
... [9., 10., 11., 12.]])
>>> x = np.reshape(x, [1, 3, 4, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(2, 2), padding="valid")
>>> avg_pool_2d(x)
`stride=(1, 1)` and `padding="same"`:
>>> x = np.array([[1., 2., 3.],
... [4., 5., 6.],
... [7., 8., 9.]])
>>> x = np.reshape(x, [1, 3, 3, 1])
>>> avg_pool_2d = keras.layers.AveragePooling2D(pool_size=(2, 2),
... strides=(1, 1), padding="same")
>>> avg_pool_2d(x)
"""
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
name=None,
**kwargs
):
super().__init__(
pool_size,
strides,
pool_dimensions=2,
pool_mode="average",
padding=padding,
data_format=data_format,
name=name,
**kwargs,
)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.4.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.4.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Video
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = Video(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(Video, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(Video, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(Video, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDocument):
video: Video
video2: Video
video3: Video
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Video
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = Video(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
image = parse_obj_as(Video, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
image = parse_obj_as(Video, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
def test_video_shortcut_doc():
class MyDoc(BaseDocument):
image: Video
image2: Video
image3: Video
doc = MyDoc(
image='http://myurl.mp4',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.mp4'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Mask(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the mask is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
from __future__ import annotations
from typing import Any, Optional, Union
import PIL.Image
import torch
from ._datapoint import Datapoint
class Mask(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for segmentation and detection masks.
Args:
data (tensor-like, PIL.Image.Image): Any data that can be turned into a tensor with :func:`torch.as_tensor` as
well as PIL images.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.transforms.v2 import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return tensor.as_subclass(cls)
|
# This is different from the TTA of official CenterNet.
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
tta_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[
[
# ``RandomFlip`` must be placed before ``RandomCenterCropPad``,
# otherwise bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
],
[dict(type='LoadAnnotations', with_bbox=True)],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'flip', 'flip_direction', 'border'))
]
])
]
|
# This is different from the TTA of official CenterNet.
tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
tta_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
file_client_args=dict(backend='disk')),
dict(
type='TestTimeAug',
transforms=[
[
# ``RandomFlip`` must be placed before ``RandomCenterCropPad``,
# otherwise bounding box coordinates after flipping cannot be
# recovered correctly.
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='RandomCenterCropPad',
ratios=None,
border=None,
mean=[0, 0, 0],
std=[1, 1, 1],
to_rgb=True,
test_mode=True,
test_pad_mode=['logical_or', 31],
test_pad_add_pix=1),
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'flip', 'flip_direction', 'border'))
]
])
]
|
"""Argparser module for container runtimes"""
import argparse
from jina.enums import DockerNetworkMode
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_container_runtime_parser(parser, pod_type: str = 'executor'):
"""Mixing in arguments required by :class:`ContainerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
:param pod_type: the pod_type configured by the parser. Can be either 'executor' for an Executor pod or 'gateway' for a Gateway pod
"""
gp = add_arg_group(parser, title='ContainerRuntime')
gp.add_argument(
'--entrypoint',
type=str,
help='The entrypoint command overrides the ENTRYPOINT in Docker image. '
'when not set then the Docker image ENTRYPOINT takes effective.',
)
gp.add_argument(
'--docker-kwargs',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
''',
)
if pod_type == 'executor':
gp.add_argument(
'--volumes',
type=str,
nargs='*',
metavar='DIR',
help='''
The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
''',
)
gp.add_argument(
'--gpus',
type=str,
help=f'''
This argument allows dockerized Jina Executors to discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
''',
)
gp.add_argument(
'--disable-auto-volume',
action='store_true',
default=False,
help=f'Do not automatically mount a volume for dockerized Executors.',
)
gp.add_argument(
'--force-network-mode',
type=DockerNetworkMode.from_string,
choices=list(DockerNetworkMode),
default=DockerNetworkMode.AUTO,
help=(
f'''
Force the use of the specified docker network mode (default: auto).
Valid options are,
- {str(DockerNetworkMode.AUTO)}: Automatically detect the docker network.
- {str(DockerNetworkMode.HOST)}: Use the host network.
- {str(DockerNetworkMode.BRIDGE)}: Use a user-defined bridge network.
- {str(DockerNetworkMode.NONE)}: Use no network (equivalent to the --network=none option).
'''
if _SHOW_ALL_ARGS
else argparse.SUPPRESS
),
)
|
"""Argparser module for container runtimes"""
import argparse
from jina.enums import DockerNetworkMode
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_container_runtime_parser(parser, pod_type: str = 'executor'):
"""Mixing in arguments required by :class:`ContainerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
:param pod_type: the pod_type configured by the parser. Can be either 'executor' for an Executor pod or 'gateway' for a Gateway pod
"""
gp = add_arg_group(parser, title='ContainerRuntime')
gp.add_argument(
'--entrypoint',
type=str,
help='The entrypoint command overrides the ENTRYPOINT in Docker image. '
'when not set then the Docker image ENTRYPOINT takes effective.',
)
gp.add_argument(
'--docker-kwargs',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
''',
)
if pod_type == 'executor':
gp.add_argument(
'--volumes',
type=str,
nargs='*',
metavar='DIR',
help='''
The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
''',
)
gp.add_argument(
'--gpus',
type=str,
help=f'''
This argument allows dockerized Jina Executors to discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
''',
)
gp.add_argument(
'--disable-auto-volume',
action='store_true',
default=False,
help=f'Do not automatically mount a volume for dockerized Executors.',
)
gp.add_argument(
'--force-network-mode',
type=DockerNetworkMode.from_string,
choices=list(DockerNetworkMode),
default=DockerNetworkMode.AUTO,
help=f'''
Force the use of the specified docker network mode (default: auto).
Valid options are,
- {str(DockerNetworkMode.AUTO)}: Automatically detect the docker network.
- {str(DockerNetworkMode.HOST)}: Use the host network.
- {str(DockerNetworkMode.BRIDGE)}: Use a user-defined bridge network.
- {str(DockerNetworkMode.NONE)}: Use no network (equivalent to the --network=none option).
'''
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
|
from typing import TYPE_CHECKING, Any
from langchain._api.module_import import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.facebook_messenger import (
FolderFacebookMessengerChatLoader,
SingleFileFacebookMessengerChatLoader,
)
module_lookup = {
"SingleFileFacebookMessengerChatLoader": (
"langchain_community.chat_loaders.facebook_messenger"
),
"FolderFacebookMessengerChatLoader": (
"langchain_community.chat_loaders.facebook_messenger"
),
}
# Temporary code for backwards compatibility for deprecated imports.
# This will eventually be removed.
import_lookup = create_importer(
__package__,
deprecated_lookups=module_lookup,
)
def __getattr__(name: str) -> Any:
return import_lookup(name)
__all__ = ["FolderFacebookMessengerChatLoader", "SingleFileFacebookMessengerChatLoader"]
|
from typing import TYPE_CHECKING, Any
from langchain._api.module_import import create_importer
if TYPE_CHECKING:
from langchain_community.chat_loaders.facebook_messenger import (
FolderFacebookMessengerChatLoader,
SingleFileFacebookMessengerChatLoader,
)
module_lookup = {
"SingleFileFacebookMessengerChatLoader": (
"langchain_community.chat_loaders.facebook_messenger"
),
"FolderFacebookMessengerChatLoader": (
"langchain_community.chat_loaders.facebook_messenger"
),
}
# Temporary code for backwards compatibility for deprecated imports.
# This will eventually be removed.
import_lookup = create_importer(
__package__,
deprecated_lookups=module_lookup,
)
def __getattr__(name: str) -> Any:
return import_lookup(name)
__all__ = ["SingleFileFacebookMessengerChatLoader", "FolderFacebookMessengerChatLoader"]
|
_base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
data_preprocessor=dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False),
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
optim_wrapper = dict(optimizer=dict(weight_decay=0.00005))
|
_base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
_delete_=True,
type='RegNet',
arch='regnetx_3.2gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')),
neck=dict(
type='FPN',
in_channels=[96, 192, 432, 1008],
out_channels=256,
num_outs=5))
img_norm_cfg = dict(
# The mean and std are used in PyCls when training RegNets
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(weight_decay=0.00005)
|
from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from __future__ import annotations
from typing import Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Wrapper")
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` layers.
Args:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
try:
assert isinstance(layer, Layer)
except Exception:
raise ValueError(
f"Layer {layer} supplied to Wrapper isn't "
"a supported layer type. Please "
"ensure wrapped layer is a valid Keras layer."
)
super().__init__(**kwargs)
self.layer = layer
def build(self, input_shape=None):
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
def get_config(self):
config = {"layer": serialization_lib.serialize_keras_object(self.layer)}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
layer = serialization_lib.deserialize_keras_object(
config.pop("layer"),
custom_objects=custom_objects,
)
return cls(layer, **config)
|
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Wrapper")
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` layers.
Args:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
try:
assert isinstance(layer, Layer)
except Exception:
raise ValueError(
f"Layer {layer} supplied to Wrapper isn't "
"a supported layer type. Please "
"ensure wrapped layer is a valid Keras layer."
)
super().__init__(**kwargs)
self.layer = layer
def build(self, input_shape=None):
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
self.built = True
def get_config(self):
config = {"layer": serialization_lib.serialize_keras_object(self.layer)}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
layer = serialization_lib.deserialize_keras_object(
config.pop("layer"),
custom_objects=custom_objects,
)
return cls(layer, **config)
|
"""Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def _is_backend_dispatcher_enabled() -> bool:
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER", default="1") == "1"
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("list_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if torchaudio._extension._SOX_INITIALIZED:
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("set_audio_backend is a no-op when the I/O backend dispatcher is enabled.")
return
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("get_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
"""Defines utilities for switching audio backends"""
import os
import warnings
from typing import List, Optional
import torchaudio
from torchaudio._internal import module_utils as _mod_utils
from . import no_backend, soundfile_backend, sox_io_backend
__all__ = [
"list_audio_backends",
"get_audio_backend",
"set_audio_backend",
]
def _is_backend_dispatcher_enabled() -> bool:
return os.getenv("TORCHAUDIO_USE_BACKEND_DISPATCHER") == "1"
def list_audio_backends() -> List[str]:
"""List available backends
Returns:
List[str]: The list of available backends.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("list_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
backends = []
if _mod_utils.is_module_available("soundfile"):
backends.append("soundfile")
if torchaudio._extension._SOX_INITIALIZED:
backends.append("sox_io")
return backends
def set_audio_backend(backend: Optional[str]):
"""Set the backend for I/O operation
Args:
backend (str or None): Name of the backend.
One of ``"sox_io"`` or ``"soundfile"`` based on availability
of the system. If ``None`` is provided the current backend is unassigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("set_audio_backend is a no-op when the I/O backend dispatcher is enabled.")
return
if backend is not None and backend not in list_audio_backends():
raise RuntimeError(f'Backend "{backend}" is not one of ' f"available backends: {list_audio_backends()}.")
if backend is None:
module = no_backend
elif backend == "sox_io":
module = sox_io_backend
elif backend == "soundfile":
module = soundfile_backend
else:
raise NotImplementedError(f'Unexpected backend "{backend}"')
for func in ["save", "load", "info"]:
setattr(torchaudio, func, getattr(module, func))
def _init_audio_backend():
backends = list_audio_backends()
if "sox_io" in backends:
set_audio_backend("sox_io")
elif "soundfile" in backends:
set_audio_backend("soundfile")
else:
warnings.warn("No audio backend is available.")
set_audio_backend(None)
def get_audio_backend() -> Optional[str]:
"""Get the name of the current backend
Returns:
Optional[str]: The name of the current backend or ``None`` if no backend is assigned.
"""
if _is_backend_dispatcher_enabled():
warnings.warn("get_audio_backend's return value is irrelevant when the I/O backend dispatcher is enabled.")
if torchaudio.load == no_backend.load:
return None
if torchaudio.load == sox_io_backend.load:
return "sox_io"
if torchaudio.load == soundfile_backend.load:
return "soundfile"
raise ValueError("Unknown backend.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseTripletLoss(model), lambda_corpus=3e-5, lambda_query=5e-5)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseTripletLoss(model), lambda_corpus=3e-5, lambda_query=5e-5)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseTripletLoss shold not be used alone. Use it with SpladeLoss or CSRLoss.")
|
"""Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolType.from_string,
choices=list(GatewayProtocolType),
default=GatewayProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--tracing',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--traces-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--traces-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--metrics',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--metrics-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--metrics-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--log-config',
type=str,
default='default',
help='The config name or the absolute path to the YAML config file of the logger used in this object.',
)
|
"""Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolType.from_string,
choices=list(GatewayProtocolType),
default=GatewayProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--tracing',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--traces-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--traces-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--metrics',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--metrics-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--metrics-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the metrics exporter agent.',
)
|
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
(['12345, 12344'], [12345, 12344]),
],
)
def test_pod_port_cast(port, expected_port):
args = set_pod_parser().parse_args(['--port'] + port)
assert args.port == expected_port
def test_pod_port_default():
args = set_pod_parser().parse_args([])
assert isinstance(args.port, list)
assert len(args.port) == 1
@pytest.mark.parametrize(
'host,expected_host',
[
(['localhost'], ['localhost']),
(['0.0.0.0,localhost'], ['0.0.0.0', 'localhost']),
(['0.0.0.0,localhost', '127.0.0.1'], ['0.0.0.0', 'localhost', '127.0.0.1']),
],
)
def test_pod_host_cast(host, expected_host):
args = set_pod_parser().parse_args(['--host'] + host)
assert args.host == expected_host
def test_pod_host_default():
from jina import __default_host__
args = set_pod_parser().parse_args([])
assert args.host == [__default_host__]
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert args.port is None
assert args.protocol == [GatewayProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
import pytest
from jina.enums import GatewayProtocolType
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser, set_pod_parser
@pytest.mark.parametrize(
'port,expected_port',
[
('12345', [12345]),
([12345], [12345]),
([12345, 12344], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
('http', [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_kwargs(
port, protocol, expected_port, expected_protocol
):
args = ArgNamespace.kwargs2namespace(
{'port': port, 'protocol': protocol}, set_gateway_parser()
)
assert args.port == expected_port
assert args.protocol == expected_protocol
@pytest.mark.parametrize(
'port,expected_port',
[
(['12345'], [12345]),
(['12345', '12344'], [12345, 12344]),
],
)
@pytest.mark.parametrize(
'protocol,expected_protocol',
[
(['http'], [GatewayProtocolType.HTTP]),
(['GRPC'], [GatewayProtocolType.GRPC]),
(['grpc', 'http'], [GatewayProtocolType.GRPC, GatewayProtocolType.HTTP]),
],
)
def test_multiple_port_protocol_gateway_args_list(
port, protocol, expected_port, expected_protocol
):
args = set_gateway_parser().parse_args(
['--port'] + port + ['--protocol'] + protocol
)
assert args.port == expected_port
assert args.protocol == expected_protocol
def test_pod_port_cast():
args = set_pod_parser().parse_args(['--port', '12345'])
assert args.port == 12345
def test_default_port_protocol_gateway():
args = set_gateway_parser().parse_args([])
assert args.port is None
assert args.protocol == [GatewayProtocolType.GRPC]
def test_get_non_defaults_args():
args = set_gateway_parser().parse_args(
[
'--port',
'12345',
'12344',
'--protocol',
'grpc',
'--uses',
'MyCustomGateway',
'--uses-with',
'{"arg":"value"}',
]
)
non_defaults = ArgNamespace.get_non_defaults_args(
args,
set_gateway_parser(),
)
assert non_defaults['port'] == [12345, 12344]
assert 'protocol' not in non_defaults
assert non_defaults['uses'] == 'MyCustomGateway'
assert non_defaults['uses_with'] == {'arg': 'value'}
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py and make_multilingual_sys.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
import csv
from tqdm.autonotebook import tqdm
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, "talks-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
output_filename_dev = os.path.join(
parallel_sentences_folder, "talks-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write("{}\t{}\n".format(src_text, trg_text))
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
"""
This script downloads the parallel sentences corpus and create parallel sentences tsv files that can be used to extend
existent sentence embedding models to new languages.
The parallel sentences corpus is a crawl of transcripts from talks, which are translated to 100+ languages.
The parallel sentences corpus cannot be downloaded automatically. It is available for research purposes only (CC-BY-NC).
The training procedure can be found in the files make_multilingual.py and make_multilingual_sys.py.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
import csv
from tqdm.autonotebook import tqdm
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
dev_sentences = 1000 # Number of sentences we want to use for development
download_url = "https://sbert.net/datasets/parallel-sentences.tsv.gz" # Specify parallel sentences URL here
parallel_sentences_path = "../datasets/parallel-sentences.tsv.gz" # Path of the parallel-sentences.tsv.gz file.
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(parallel_sentences_path), exist_ok=True)
if not os.path.exists(parallel_sentences_path):
print("parallel-sentences.tsv.gz does not exists. Try to download from server")
sentence_transformers.util.http_get(download_url, parallel_sentences_path)
os.makedirs(parallel_sentences_folder, exist_ok=True)
train_files = []
dev_files = []
files_to_create = []
for source_lang in source_languages:
for target_lang in target_languages:
output_filename_train = os.path.join(
parallel_sentences_folder, "talks-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
output_filename_dev = os.path.join(
parallel_sentences_folder, "talks-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
train_files.append(output_filename_train)
dev_files.append(output_filename_dev)
if not os.path.exists(output_filename_train) or not os.path.exists(output_filename_dev):
files_to_create.append(
{
"src_lang": source_lang,
"trg_lang": target_lang,
"fTrain": gzip.open(output_filename_train, "wt", encoding="utf8"),
"fDev": gzip.open(output_filename_dev, "wt", encoding="utf8"),
"devCount": 0,
}
)
if len(files_to_create) > 0:
print(
"Parallel sentences files {} do not exist. Create these files now".format(
", ".join(map(lambda x: x["src_lang"] + "-" + x["trg_lang"], files_to_create))
)
)
with gzip.open(parallel_sentences_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for line in tqdm(reader, desc="Sentences"):
for outfile in files_to_create:
src_text = line[outfile["src_lang"]].strip()
trg_text = line[outfile["trg_lang"]].strip()
if src_text != "" and trg_text != "":
if outfile["devCount"] < dev_sentences:
outfile["devCount"] += 1
fOut = outfile["fDev"]
else:
fOut = outfile["fTrain"]
fOut.write("{}\t{}\n".format(src_text, trg_text))
for outfile in files_to_create:
outfile["fTrain"].close()
outfile["fDev"].close()
print("---DONE---")
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
traversal_paths: str = 'r',
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = False,
*args,
**kwargs,
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
:param download_model: whether to download the model at start-up
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.device = device
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
script_name = 'scripts/download_full.sh'
if 'Partial' in self.model_path:
script_name = 'scripts/download_partial.sh'
subprocess.call(['sh', script_name], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=self.model_path).to(self.device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs,
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.traverse_flat(traversal_paths).batch(batch_size):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, dict(d.tags).get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return blob, orig_sr
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Any, Iterable, Optional
import librosa as lr
import numpy as np
import torch
from jina import DocumentArray, Executor, requests
from jina.excepts import BadDocType
from .audio_clip.model import AudioCLIP
class AudioCLIPEncoder(Executor):
"""
Encode audio data with AudioCLIP embeddings
"""
TARGET_SAMPLE_RATE = 44100 # derived from ESResNeXt
def __init__(
self,
model_path: str = 'assets/AudioCLIP-Full-Training.pt',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path of the pre-trained AudioCLIP model
:param traversal_paths: default traversal path
:param device: Torch device string (e.g. 'cpu', 'cuda', 'cuda:2')
"""
super().__init__(*args, **kwargs)
torch.set_grad_enabled(False)
self.model_path = model_path
self.traversal_paths = traversal_paths
self.batch_size = batch_size
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_model.sh'], cwd=root_path)
try:
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
except FileNotFoundError:
raise FileNotFoundError(
'Please download AudioCLIP model and set the `model_path` argument.'
)
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> Any:
"""
Encode all Documents with audio data (stored in the ``blob`` attribute) and store the
embeddings in the ``embedding`` attribute of the Documents.
:param docs: a `DocumentArray` contains `Document`s with `blob` of the size (n,) or (2, n).
The `blob` contains audio time-series data. Additionally,
`tags` of each `Document` must contain `sample_rate` field,
which has the sample rate of the audio data. The `sample_rate` must be a positive
scalar value.
:param parameters: dictionary to defines the `traversal_paths`.
"""
if not docs:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
with torch.inference_mode():
for batch in docs.traverse_flat(traversal_paths).batch(batch_size):
self._create_embeddings(batch)
def _create_embeddings(self, filtered_docs: Iterable):
"""Update the documents with the embeddings generated by AudioCLIP"""
for d in filtered_docs:
d.blob, d.tags['sample_rate'] = self._resample(
d.blob, d.tags.get('sample_rate', None)
)
audio = torch.Tensor(d.blob).unsqueeze(0)
embedding = self.model.encode_audio(audio=audio)[0]
d.embedding = embedding.cpu().numpy()
def _resample(self, blob: np.ndarray, orig_sr: int):
if orig_sr is None:
raise BadDocType(
'sample rate is not given, please provide a valid sample rate'
)
if orig_sr == AudioCLIPEncoder.TARGET_SAMPLE_RATE:
return blob, orig_sr
return (
lr.resample(blob, orig_sr, AudioCLIPEncoder.TARGET_SAMPLE_RATE),
AudioCLIPEncoder.TARGET_SAMPLE_RATE,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.config import ConfigDict
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig, OptConfigType
@MODELS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(
self,
num_ins: int,
fusion_level: int,
seg_scale_factor=1 / 8,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 183,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
ignore_label: int = None,
loss_weight: float = None,
loss_seg: ConfigDict = dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2),
init_cfg: MultiConfig = dict(
type='Kaiming', override=dict(name='conv_logits'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.seg_scale_factor = seg_scale_factor
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = MODELS.build(loss_seg)
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (tuple[Tensor]): Multi scale feature maps.
Returns:
tuple[Tensor]:
- mask_pred (Tensor): Predicted mask logits.
- x (Tensor): Fused feature.
"""
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
def loss(self, mask_pred: Tensor, labels: Tensor) -> Tensor:
"""Loss function.
Args:
mask_pred (Tensor): Predicted mask logits.
labels (Tensor): Ground truth.
Returns:
Tensor: Semantic segmentation loss.
"""
labels = F.interpolate(
labels.float(), scale_factor=self.seg_scale_factor, mode='nearest')
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import Tuple
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import ConfigDict, MultiConfig, OptConfigType
from mmdet.registry import MODELS
@MODELS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(
self,
num_ins: int,
fusion_level: int,
seg_scale_factor=1 / 8,
num_convs: int = 4,
in_channels: int = 256,
conv_out_channels: int = 256,
num_classes: int = 183,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
ignore_label: int = None,
loss_weight: float = None,
loss_seg: ConfigDict = dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.2),
init_cfg: MultiConfig = dict(
type='Kaiming', override=dict(name='conv_logits'))
) -> None:
super().__init__(init_cfg=init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.seg_scale_factor = seg_scale_factor
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = MODELS.build(loss_seg)
def forward(self, feats: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function.
Args:
feats (tuple[Tensor]): Multi scale feature maps.
Returns:
tuple[Tensor]:
- mask_pred (Tensor): Predicted mask logits.
- x (Tensor): Fused feature.
"""
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
def loss(self, mask_pred: Tensor, labels: Tensor) -> Tensor:
"""Loss function.
Args:
mask_pred (Tensor): Predicted mask logits.
labels (Tensor): Ground truth.
Returns:
Tensor: Semantic segmentation loss.
"""
labels = F.interpolate(
labels.float(), scale_factor=self.seg_scale_factor, mode='nearest')
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
# delete the root document
self._del_docs_by_ids([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
self._annlite.delete([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D NumPy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> keras.utils.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
pad either before or after each sequence.
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to `0.`)
Returns:
NumPy array with shape `(len(sequences), maxlen)`
"""
if not hasattr(sequences, "__len__"):
raise ValueError("`sequences` must be iterable.")
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError(
"`sequences` must be a list of iterables. "
f"Found non-iterable: {str(x)}"
) from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.str_
)
if isinstance(value, str) and dtype is not object and not is_dtype_str:
raise ValueError(
f"`dtype` {dtype} is not compatible with `value`'s type: "
f"{type(value)}\nYou should set `dtype=object` for variable length "
"strings."
)
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == "pre":
trunc = s[-maxlen:]
elif truncating == "post":
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f"Shape of sample {trunc.shape[1:]} of sequence at "
f"position {idx} is different from expected shape "
f"{sample_shape}"
)
if padding == "post":
x[idx, : len(trunc)] = trunc
elif padding == "pre":
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
|
import numpy as np
from keras.src.api_export import keras_export
@keras_export(
[
"keras.utils.pad_sequences",
"keras.preprocessing.sequence.pad_sequences",
]
)
def pad_sequences(
sequences,
maxlen=None,
dtype="int32",
padding="pre",
truncating="pre",
value=0.0,
):
"""Pads sequences to the same length.
This function transforms a list (of length `num_samples`)
of sequences (lists of integers)
into a 2D NumPy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence in the list.
Sequences that are shorter than `num_timesteps`
are padded with `value` until they are `num_timesteps` long.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding or removing values from the beginning of the sequence is the
default.
>>> sequence = [[1], [2, 3], [4, 5, 6]]
>>> keras.utils.pad_sequences(sequence)
array([[0, 0, 1],
[0, 2, 3],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, value=-1)
array([[-1, -1, 1],
[-1, 2, 3],
[ 4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, padding='post')
array([[1, 0, 0],
[2, 3, 0],
[4, 5, 6]], dtype=int32)
>>> keras.utils.pad_sequences(sequence, maxlen=2)
array([[0, 1],
[2, 3],
[5, 6]], dtype=int32)
Args:
sequences: List of sequences (each sequence is a list of integers).
maxlen: Optional Int, maximum length of all sequences. If not provided,
sequences will be padded to the length of the longest individual
sequence.
dtype: (Optional, defaults to `"int32"`). Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, "pre" or "post" (optional, defaults to `"pre"`):
pad either before or after each sequence.
truncating: String, "pre" or "post" (optional, defaults to `"pre"`):
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value. (Optional, defaults to `0.`)
Returns:
NumPy array with shape `(len(sequences), maxlen)`
"""
if not hasattr(sequences, "__len__"):
raise ValueError("`sequences` must be iterable.")
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError as e:
raise ValueError(
"`sequences` must be a list of iterables. "
f"Found non-iterable: {str(x)}"
) from e
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(
dtype, np.str_
)
if isinstance(value, str) and dtype != object and not is_dtype_str:
raise ValueError(
f"`dtype` {dtype} is not compatible with `value`'s type: "
f"{type(value)}\nYou should set `dtype=object` for variable length "
"strings."
)
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == "pre":
trunc = s[-maxlen:]
elif truncating == "post":
trunc = s[:maxlen]
else:
raise ValueError(f'Truncating type "{truncating}" not understood')
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
f"Shape of sample {trunc.shape[1:]} of sequence at "
f"position {idx} is different from expected shape "
f"{sample_shape}"
)
if padding == "post":
x[idx, : len(trunc)] = trunc
elif padding == "pre":
x[idx, -len(trunc) :] = trunc
else:
raise ValueError(f'Padding type "{padding}" not understood')
return x
|
import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import abcdefg # no install and unlist
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import ngt # list but no install
fake_tags = ['ngt', 'index', 'py37']
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt.abc.edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
from ngt.abc import edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
import abcdefg
assert not ie._tags
def test_no_suppress_other_exception():
with pytest.raises(Exception):
with ImportExtensions(required=False, logger=default_logger):
raise Exception
with pytest.raises(Exception):
with ImportExtensions(required=True, logger=default_logger):
raise Exception
def test_path_importer(tmpdir):
tmpmodule = f'package.py'
with open(tmpdir / tmpmodule, 'w') as f:
f.write("raise ImportError")
from jina.importer import PathImporter
with pytest.raises(ImportError):
PathImporter.add_modules(tmpdir / tmpmodule)
with pytest.raises(FileNotFoundError):
PathImporter.add_modules('some_package.py')
|
import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import abcdefg # no install and unlist
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import ngt # list but no install
fake_tags = ['ngt', 'index', 'py37']
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt.abc.edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
from ngt.abc import edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
import abcdefg
assert not ie._tags
def test_no_suppress_other_exception():
with pytest.raises(Exception):
with ImportExtensions(required=False, logger=default_logger):
raise Exception
with pytest.raises(Exception):
with ImportExtensions(required=True, logger=default_logger):
raise Exception
|
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from .errors import WorkflowValidationError
from .utils import (
ServiceDefinition,
inspect_signature,
is_free_function,
validate_step_signature,
)
from .resource import ResourceDefinition
if TYPE_CHECKING: # pragma: no cover
from .workflow import Workflow
from .retry_policy import RetryPolicy
class StepConfig(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
accepted_events: List[Any]
event_name: str
return_types: List[Any]
context_parameter: Optional[str]
num_workers: int
requested_services: List[ServiceDefinition]
retry_policy: Optional[RetryPolicy]
resources: List[ResourceDefinition]
def step(
*args: Any,
workflow: Optional[Type["Workflow"]] = None,
pass_context: bool = False,
num_workers: int = 4,
retry_policy: Optional[RetryPolicy] = None,
) -> Callable:
"""
Decorator used to mark methods and functions as workflow steps.
Decorators are evaluated at import time, but we need to wait for
starting the communication channels until runtime. For this reason,
we temporarily store the list of events that will be consumed by this
step in the function object itself.
Args:
workflow: Workflow class to which the decorated step will be added. Only needed when using the
decorator on free functions instead of class methods.
num_workers: The number of workers that will process events for the decorated step. The default
value works most of the times.
retry_policy: The policy used to retry a step that encountered an error while running.
"""
def decorator(func: Callable) -> Callable:
if not isinstance(num_workers, int) or num_workers <= 0:
raise WorkflowValidationError(
"num_workers must be an integer greater than 0"
)
# This will raise providing a message with the specific validation failure
spec = inspect_signature(func)
validate_step_signature(spec)
event_name, accepted_events = next(iter(spec.accepted_events.items()))
# store the configuration in the function object
func.__step_config = StepConfig( # type: ignore[attr-defined]
accepted_events=accepted_events,
event_name=event_name,
return_types=spec.return_types,
context_parameter=spec.context_parameter,
num_workers=num_workers,
requested_services=spec.requested_services or [],
retry_policy=retry_policy,
resources=spec.resources,
)
# If this is a free function, call add_step() explicitly.
if is_free_function(func.__qualname__):
if workflow is None:
msg = f"To decorate {func.__name__} please pass a workflow class to the @step decorator."
raise WorkflowValidationError(msg)
workflow.add_step(func)
return func
if len(args):
# The decorator was used without parentheses, like `@step`
func = args[0]
decorator(func)
return func
return decorator
|
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
from llama_index.core.bridge.pydantic import BaseModel, ConfigDict
from .errors import WorkflowValidationError
from .utils import (
is_free_function,
validate_step_signature,
inspect_signature,
ServiceDefinition,
)
if TYPE_CHECKING: # pragma: no cover
from .workflow import Workflow
from .retry_policy import RetryPolicy
class StepConfig(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
accepted_events: List[Any]
event_name: str
return_types: List[Any]
context_parameter: Optional[str]
num_workers: int
requested_services: List[ServiceDefinition]
retry_policy: Optional[RetryPolicy]
def step(
*args: Any,
workflow: Optional[Type["Workflow"]] = None,
pass_context: bool = False,
num_workers: int = 4,
retry_policy: Optional[RetryPolicy] = None,
) -> Callable:
"""
Decorator used to mark methods and functions as workflow steps.
Decorators are evaluated at import time, but we need to wait for
starting the communication channels until runtime. For this reason,
we temporarily store the list of events that will be consumed by this
step in the function object itself.
Args:
workflow: Workflow class to which the decorated step will be added. Only needed when using the
decorator on free functions instead of class methods.
num_workers: The number of workers that will process events for the decorated step. The default
value works most of the times.
retry_policy: The policy used to retry a step that encountered an error while running.
"""
def decorator(func: Callable) -> Callable:
if not isinstance(num_workers, int) or num_workers <= 0:
raise WorkflowValidationError(
"num_workers must be an integer greater than 0"
)
# This will raise providing a message with the specific validation failure
spec = inspect_signature(func)
validate_step_signature(spec)
event_name, accepted_events = next(iter(spec.accepted_events.items()))
# store the configuration in the function object
func.__step_config = StepConfig( # type: ignore[attr-defined]
accepted_events=accepted_events,
event_name=event_name,
return_types=spec.return_types,
context_parameter=spec.context_parameter,
num_workers=num_workers,
requested_services=spec.requested_services or [],
retry_policy=retry_policy,
)
# If this is a free function, call add_step() explicitly.
if is_free_function(func.__qualname__):
if workflow is None:
msg = f"To decorate {func.__name__} please pass a workflow class to the @step decorator."
raise WorkflowValidationError(msg)
workflow.add_step(func)
return func
if len(args):
# The decorator was used without parentheses, like `@step`
func = args[0]
decorator(func)
return func
return decorator
|
from typing import Any, List, Optional
from mcp.client.session import ClientSession
from mcp.server.fastmcp import FastMCP, Context
from pydantic import BaseModel
from llama_index.core.tools import FunctionTool
from llama_index.core.workflow import Event, StartEvent, StopEvent, Workflow
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
def get_tools_from_mcp_url(
command_or_url: str,
client: Optional[ClientSession] = None,
allowed_tools: Optional[List[str]] = None,
include_resources: bool = False,
) -> List[FunctionTool]:
"""
Get tools from an MCP server or command.
Args:
command_or_url: The command to run or the URL to connect to.
client (optional): The client to use to connect to the MCP server.
allowed_tools (optional): The tool names to allow from the MCP server.
include_resources (optional): Whether to include resources in the tool list.
"""
client = client or BasicMCPClient(command_or_url)
tool_spec = McpToolSpec(
client, allowed_tools=allowed_tools, include_resources=include_resources
)
return tool_spec.to_tool_list()
async def aget_tools_from_mcp_url(
command_or_url: str,
client: Optional[ClientSession] = None,
allowed_tools: Optional[List[str]] = None,
include_resources: bool = False,
) -> List[FunctionTool]:
"""
Get tools from an MCP server or command.
Args:
command_or_url: The command to run or the URL to connect to.
client (optional): The client to use to connect to the MCP server.
allowed_tools (optional): The tool names to allow from the MCP server.
include_resources (optional): Whether to include resources in the tool list.
"""
client = client or BasicMCPClient(command_or_url)
tool_spec = McpToolSpec(
client, allowed_tools=allowed_tools, include_resources=include_resources
)
return await tool_spec.to_tool_list_async()
def workflow_as_mcp(
workflow: Workflow,
workflow_name: Optional[str] = None,
workflow_description: Optional[str] = None,
start_event_model: Optional[BaseModel] = None,
**fastmcp_init_kwargs: Any,
) -> FastMCP:
"""
Convert a workflow to an MCP app.
This will convert any `Workflow` to an MCP app. It will expose the workflow as a tool
within MCP, which will
Args:
workflow:
The workflow to convert.
workflow_name (optional):
The name of the workflow. Defaults to the workflow class name.
workflow_description (optional):
The description of the workflow. Defaults to the workflow docstring.
start_event_model (optional):
The start event model of the workflow. Can be a `BaseModel` or a `StartEvent` class.
Defaults to the workflow's custom `StartEvent` class.
**fastmcp_init_kwargs:
Additional keyword arguments to pass to the FastMCP constructor.
Returns:
The MCP app object.
"""
app = FastMCP(**fastmcp_init_kwargs)
# Dynamically get the start event class -- this is a bit of a hack
StartEventCLS = start_event_model or workflow._start_event_class
if StartEventCLS == StartEvent:
raise ValueError(
"Must declare a custom StartEvent class in your workflow or provide a start_event_model."
)
# Get the workflow name and description
workflow_name = workflow_name or workflow.__class__.__name__
workflow_description = workflow_description or workflow.__doc__
@app.tool(name=workflow_name, description=workflow_description)
async def _workflow_tool(run_args: StartEventCLS, context: Context) -> Any:
# Handle edge cases where the start event is an Event or a BaseModel
# If the workflow does not have a custom StartEvent class, then we need to handle the event differently
if isinstance(run_args, Event) and workflow._start_event_class != StartEvent:
handler = workflow.run(start_event=run_args)
elif isinstance(run_args, BaseModel):
handler = workflow.run(**run_args.model_dump())
elif isinstance(run_args, dict):
start_event = StartEventCLS.model_validate(run_args)
handler = workflow.run(start_event=start_event)
else:
raise ValueError(f"Invalid start event type: {type(run_args)}")
async for event in handler.stream_events():
if not isinstance(event, StopEvent):
await context.log("info", message=event.model_dump_json())
return await handler
return app
|
from typing import Any, List, Optional
from mcp.client.session import ClientSession
from mcp.server.fastmcp import FastMCP, Context
from pydantic import BaseModel
from llama_index.core.tools import FunctionTool
from llama_index.core.workflow import Event, StartEvent, StopEvent, Workflow
from llama_index.tools.mcp.base import McpToolSpec
from llama_index.tools.mcp.client import BasicMCPClient
def get_tools_from_mcp_url(
command_or_url: str,
client: Optional[ClientSession] = None,
allowed_tools: Optional[List[str]] = None,
include_resources: bool = False,
) -> List[FunctionTool]:
"""
Get tools from an MCP server or command.
Args:
command_or_url: The command to run or the URL to connect to.
client (optional): The client to use to connect to the MCP server.
allowed_tools (optional): The tool names to allow from the MCP server.
include_resources (optional): Whether to include resources in the tool list.
"""
client = client or BasicMCPClient(command_or_url)
tool_spec = McpToolSpec(
client, allowed_tools=allowed_tools, include_resources=include_resources
)
return tool_spec.to_tool_list()
async def aget_tools_from_mcp_url(
command_or_url: str,
client: Optional[ClientSession] = None,
allowed_tools: Optional[List[str]] = None,
include_resources: bool = False,
) -> List[FunctionTool]:
"""
Get tools from an MCP server or command.
Args:
command_or_url: The command to run or the URL to connect to.
client (optional): The client to use to connect to the MCP server.
allowed_tools (optional): The tool names to allow from the MCP server.
include_resources (optional): Whether to include resources in the tool list.
"""
client = client or BasicMCPClient(command_or_url)
tool_spec = McpToolSpec(
client, allowed_tools=allowed_tools, include_resources=include_resources
)
return await tool_spec.to_tool_list_async()
def workflow_as_mcp(
workflow: Workflow,
workflow_name: Optional[str] = None,
workflow_description: Optional[str] = None,
start_event_model: Optional[BaseModel] = None,
**fastmcp_init_kwargs: Any,
) -> FastMCP:
"""
Convert a workflow to an MCP app.
This will convert any `Workflow` to an MCP app. It will expose the workflow as a tool
within MCP, which will
Args:
workflow:
The workflow to convert.
workflow_name (optional):
The name of the workflow. Defaults to the workflow class name.
workflow_description (optional):
The description of the workflow. Defaults to the workflow docstring.
start_event_model (optional):
The start event model of the workflow. Can be a `BaseModel` or a `StartEvent` class.
Defaults to the workflow's custom `StartEvent` class.
**fastmcp_init_kwargs:
Additional keyword arguments to pass to the FastMCP constructor.
Returns:
The MCP app object.
"""
app = FastMCP(**fastmcp_init_kwargs)
# Dynamically get the start event class -- this is a bit of a hack
StartEventCLS = start_event_model or workflow._start_event_class
if StartEventCLS == StartEvent:
raise ValueError(
"Must declare a custom StartEvent class in your workflow or provide a start_event_model."
)
# Get the workflow name and description
workflow_name = workflow_name or workflow.__class__.__name__
workflow_description = workflow_description or workflow.__doc__
@app.tool(name=workflow_name, description=workflow_description)
async def _workflow_tool(run_args: StartEventCLS, context: Context) -> Any:
# Handle edge cases where the start event is an Event or a BaseModel
# If the workflow does not have a custom StartEvent class, then we need to handle the event differently
context.session
if isinstance(run_args, Event) and workflow._start_event_class != StartEvent:
handler = workflow.run(start_event=run_args)
elif isinstance(run_args, BaseModel):
handler = workflow.run(**run_args.model_dump())
else:
raise ValueError(f"Invalid start event type: {type(run_args)}")
async for event in handler.stream_events():
if not isinstance(event, StopEvent):
await context.log("info", message=event.model_dump_json())
return await handler
return app
|
"""Test Petals API wrapper."""
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.petals import Petals
def test_api_key_is_string() -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type]
assert isinstance(llm.huggingface_api_key, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type]
print(llm.huggingface_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_gooseai_call() -> None:
"""Test valid call to gooseai."""
llm = Petals(max_new_tokens=10)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
"""Test Petals API wrapper."""
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.llms.petals import Petals
def test_api_key_is_string() -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
assert isinstance(llm.huggingface_api_key, SecretStr)
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = Petals(huggingface_api_key="secret-api-key") # type: ignore[arg-type, call-arg]
print(llm.huggingface_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
def test_gooseai_call() -> None:
"""Test valid call to gooseai."""
llm = Petals(max_new_tokens=10) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
__all__ = ["RETURN_VAL_TYPE", "BaseCache"]
|
from langchain_core.caches import RETURN_VAL_TYPE, BaseCache
__all__ = ["BaseCache", "RETURN_VAL_TYPE"]
|
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
Simple web page reader.
Reads pages from the web.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a URL and returns a dictionary of metadata.
Default is None.
"""
is_remote: bool = True
html_to_text: bool
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
html_to_text: bool = False,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
super().__init__(html_to_text=html_to_text)
self._metadata_fn = metadata_fn
@classmethod
def class_name(cls) -> str:
return "SimpleWebPageReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
response = requests.get(url, headers=None).text
if self.html_to_text:
import html2text
response = html2text.html2text(response)
metadata: Optional[Dict] = None
if self._metadata_fn is not None:
metadata = self._metadata_fn(url)
documents.append(Document(text=response, id_=url, metadata=metadata or {}))
return documents
|
"""Simple Web scraper."""
from typing import List, Optional, Dict, Callable
import requests
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class SimpleWebPageReader(BasePydanticReader):
"""
Simple web page reader.
Reads pages from the web.
Args:
html_to_text (bool): Whether to convert HTML to text.
Requires `html2text` package.
metadata_fn (Optional[Callable[[str], Dict]]): A function that takes in
a URL and returns a dictionary of metadata.
Default is None.
"""
is_remote: bool = True
html_to_text: bool
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
def __init__(
self,
html_to_text: bool = False,
metadata_fn: Optional[Callable[[str], Dict]] = None,
) -> None:
"""Initialize with parameters."""
try:
import html2text # noqa
except ImportError:
raise ImportError(
"`html2text` package not found, please run `pip install html2text`"
)
super().__init__(html_to_text=html_to_text)
self._metadata_fn = metadata_fn
@classmethod
def class_name(cls) -> str:
return "SimpleWebPageReader"
def load_data(self, urls: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
urls (List[str]): List of URLs to scrape.
Returns:
List[Document]: List of documents.
"""
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
for url in urls:
response = requests.get(url, headers=None).text
if self.html_to_text:
import html2text
response = html2text.html2text(response)
metadata: Optional[Dict] = None
if self._metadata_fn is not None:
metadata = self._metadata_fn(url)
documents.append(Document(text=response, id_=url, metadata=metadata or {}))
return documents
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, SanitizeBoundingBoxes, ToDtype
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertDtype, ConvertImageDtype
from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, SanitizeBoundingBoxes, ToDtype
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
|
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from sentencizer import Sentencizer
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.min_sent_len == 1
@pytest.mark.parametrize('traversal_paths', ['@r', '@c'])
def test_executor(traversal_paths):
ex = Sentencizer(traversal_paths=traversal_paths)
doc = Document(text='Hello. World! Go? Back')
if 'c' in traversal_paths:
da = DocumentArray([Document(chunks=[doc])])
else:
da = DocumentArray([doc])
ex.segment(da, {})
flattened_docs = da[traversal_paths]
assert len(flattened_docs) == 1
assert len(flattened_docs[0].chunks) == 4
assert flattened_docs[0].chunks[0].text == 'Hello.'
assert flattened_docs[0].chunks[1].text == 'World!'
assert flattened_docs[0].chunks[2].text == 'Go?'
assert flattened_docs[0].chunks[3].text == 'Back'
def test_executor_with_punct_chars():
ex = Sentencizer(punct_chars=['.'])
da = DocumentArray([Document(text='Hello. World! Go? Back')])
ex.segment(da, {})
assert len(da) == 1
assert len(da[0].chunks) == 2
assert da[0].chunks[0].text == 'Hello.'
assert da[0].chunks[1].text == 'World! Go? Back'
def test_executor_with_max_sent_length():
ex = Sentencizer(punct_chars=['.'], max_sent_len=3)
da = DocumentArray([Document(text='Hello. World')])
ex.segment(da, {})
assert len(da) == 1
assert len(da[0].chunks) == 2
assert da[0].chunks[0].text == 'Hel'
assert da[0].chunks[1].text == 'Wor'
def test_executor_empty_input():
ex = Sentencizer()
da = DocumentArray()
ex.segment(da, {})
assert len(da) == 0
def test_executor_none_input():
ex = Sentencizer()
ex.segment(None, {})
|
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
from sentencizer import Sentencizer
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.min_sent_len == 1
@pytest.mark.parametrize('traversal_paths', [('r',), ('c',)])
def test_executor(traversal_paths):
ex = Sentencizer(traversal_paths=traversal_paths)
doc = Document(text='Hello. World! Go? Back')
if 'c' in traversal_paths:
da = DocumentArray([Document(chunks=[doc])])
else:
da = DocumentArray([doc])
ex.segment(da, {})
flattened_docs = da.traverse_flat(traversal_paths)
assert len(flattened_docs) == 1
assert len(flattened_docs[0].chunks) == 4
assert flattened_docs[0].chunks[0].text == 'Hello.'
assert flattened_docs[0].chunks[1].text == 'World!'
assert flattened_docs[0].chunks[2].text == 'Go?'
assert flattened_docs[0].chunks[3].text == 'Back'
def test_executor_with_punct_chars():
ex = Sentencizer(punct_chars=['.'])
da = DocumentArray([Document(text='Hello. World! Go? Back')])
ex.segment(da, {})
assert len(da) == 1
assert len(da[0].chunks) == 2
assert da[0].chunks[0].text == 'Hello.'
assert da[0].chunks[1].text == 'World! Go? Back'
def test_executor_with_max_sent_length():
ex = Sentencizer(punct_chars=['.'], max_sent_len=3)
da = DocumentArray([Document(text='Hello. World')])
ex.segment(da, {})
assert len(da) == 1
assert len(da[0].chunks) == 2
assert da[0].chunks[0].text == 'Hel'
assert da[0].chunks[1].text == 'Wor'
def test_executor_empty_input():
ex = Sentencizer()
da = DocumentArray()
ex.segment(da, {})
assert len(da) == 0
def test_executor_none_input():
ex = Sentencizer()
ex.segment(None, {})
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def register_torch_npu_optimizers() -> List[str]:
"""Register optimizers in ``torch npu`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
if not is_npu_available():
return []
import torch_npu
if not hasattr(torch_npu, 'optim'):
return []
torch_npu_optimizers = []
for module_name in dir(torch_npu.optim):
if module_name.startswith('__') or module_name in OPTIMIZERS:
continue
_optim = getattr(torch_npu.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_npu_optimizers.append(module_name)
return torch_npu_optimizers
NPU_OPTIMIZERS = register_torch_npu_optimizers()
def register_dadaptation_optimizers() -> List[str]:
"""Register optimizers in ``dadaptation`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
dadaptation_optimizers = []
try:
import dadaptation
except ImportError:
pass
else:
for module_name in ['DAdaptAdaGrad', 'DAdaptAdam', 'DAdaptSGD']:
_optim = getattr(dadaptation, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
dadaptation_optimizers.append(module_name)
return dadaptation_optimizers
DADAPTATION_OPTIMIZERS = register_dadaptation_optimizers()
def register_lion_optimizers() -> List[str]:
"""Register Lion optimizer to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
optimizers = []
try:
from lion_pytorch import Lion
except ImportError:
pass
else:
OPTIMIZERS.register_module(module=Lion)
optimizers.append('Lion')
return optimizers
LION_OPTIMIZERS = register_lion_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def register_dadaptation_optimizers() -> List[str]:
"""Register optimizers in ``dadaptation`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
dadaptation_optimizers = []
try:
import dadaptation
except ImportError:
pass
else:
for module_name in ['DAdaptAdaGrad', 'DAdaptAdam', 'DAdaptSGD']:
_optim = getattr(dadaptation, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
dadaptation_optimizers.append(module_name)
return dadaptation_optimizers
DADAPTATION_OPTIMIZERS = register_dadaptation_optimizers()
def register_lion_optimizers() -> List[str]:
"""Register Lion optimizer to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
optimizers = []
try:
from lion_pytorch import Lion
except ImportError:
pass
else:
OPTIMIZERS.register_module(module=Lion)
optimizers.append('Lion')
return optimizers
LION_OPTIMIZERS = register_lion_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
import logging
import os
import zlib
from contextlib import asynccontextmanager
from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn_retry
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
def add_param(url: str, key: str, value: str) -> str:
p = urlparse(url)
qs = dict(parse_qsl(p.query))
qs[key] = value
return urlunparse(p._replace(query=urlencode(qs)))
DATABASE_URL = os.getenv("DATABASE_URL")
if not DATABASE_URL:
raise ValueError("DATABASE_URL is not set.")
CONN_LIMIT = os.getenv("DB_CONNECTION_LIMIT")
if CONN_LIMIT:
DATABASE_URL = add_param(DATABASE_URL, "connection_limit", CONN_LIMIT)
CONN_TIMEOUT = os.getenv("DB_CONNECT_TIMEOUT")
if CONN_TIMEOUT:
DATABASE_URL = add_param(DATABASE_URL, "connect_timeout", CONN_TIMEOUT)
POOL_TIMEOUT = os.getenv("DB_POOL_TIMEOUT")
if POOL_TIMEOUT:
DATABASE_URL = add_param(DATABASE_URL, "pool_timeout", POOL_TIMEOUT)
HTTP_TIMEOUT = int(POOL_TIMEOUT) if POOL_TIMEOUT else None
prisma = Prisma(
auto_register=True,
http={"timeout": HTTP_TIMEOUT},
datasource={"url": DATABASE_URL},
)
logger = logging.getLogger(__name__)
@conn_retry("Prisma", "Acquiring connection")
async def connect():
if prisma.is_connected():
return
await prisma.connect()
if not prisma.is_connected():
raise ConnectionError("Failed to connect to Prisma.")
# Connection acquired from a pool like Supabase somehow still possibly allows
# the db client obtains a connection but still reject query connection afterward.
try:
await prisma.execute_raw("SELECT 1")
except Exception as e:
raise ConnectionError("Failed to connect to Prisma.") from e
@conn_retry("Prisma", "Releasing connection")
async def disconnect():
if not prisma.is_connected():
return
await prisma.disconnect()
if prisma.is_connected():
raise ConnectionError("Failed to disconnect from Prisma.")
@asynccontextmanager
async def transaction():
async with prisma.tx() as tx:
yield tx
@asynccontextmanager
async def locked_transaction(key: str):
lock_key = zlib.crc32(key.encode("utf-8"))
async with transaction() as tx:
await tx.execute_raw(f"SELECT pg_advisory_xact_lock({lock_key})")
yield tx
class BaseDbModel(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
@field_validator("id", mode="before")
def set_model_id(cls, id: str) -> str:
# In case an empty ID is submitted
return id or str(uuid4())
|
import logging
import os
import zlib
from contextlib import asynccontextmanager
from uuid import uuid4
from dotenv import load_dotenv
from prisma import Prisma
from pydantic import BaseModel, Field, field_validator
from backend.util.retry import conn_retry
load_dotenv()
PRISMA_SCHEMA = os.getenv("PRISMA_SCHEMA", "schema.prisma")
os.environ["PRISMA_SCHEMA_PATH"] = PRISMA_SCHEMA
prisma = Prisma(auto_register=True)
logger = logging.getLogger(__name__)
@conn_retry("Prisma", "Acquiring connection")
async def connect():
if prisma.is_connected():
return
await prisma.connect()
if not prisma.is_connected():
raise ConnectionError("Failed to connect to Prisma.")
# Connection acquired from a pool like Supabase somehow still possibly allows
# the db client obtains a connection but still reject query connection afterward.
try:
await prisma.execute_raw("SELECT 1")
except Exception as e:
raise ConnectionError("Failed to connect to Prisma.") from e
@conn_retry("Prisma", "Releasing connection")
async def disconnect():
if not prisma.is_connected():
return
await prisma.disconnect()
if prisma.is_connected():
raise ConnectionError("Failed to disconnect from Prisma.")
@asynccontextmanager
async def transaction():
async with prisma.tx() as tx:
yield tx
@asynccontextmanager
async def locked_transaction(key: str):
lock_key = zlib.crc32(key.encode("utf-8"))
async with transaction() as tx:
await tx.execute_raw(f"SELECT pg_advisory_xact_lock({lock_key})")
yield tx
class BaseDbModel(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
@field_validator("id", mode="before")
def set_model_id(cls, id: str) -> str:
# In case an empty ID is submitted
return id or str(uuid4())
|
import shutil
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileMoveInput(BaseModel):
"""Input for MoveFileTool."""
source_path: str = Field(..., description="Path of the file to move")
destination_path: str = Field(..., description="New path for the moved file")
class MoveFileTool(BaseFileToolMixin, BaseTool):
"""Tool that moves a file."""
name: str = "move_file"
args_schema: Type[BaseModel] = FileMoveInput
description: str = "Move or rename a file from one location to another"
def _run(
self,
source_path: str,
destination_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path_", value=destination_path_
)
if not source_path_.exists():
return f"Error: no such file or directory {source_path}"
try:
# shutil.move expects str args in 3.8
shutil.move(str(source_path_), destination_path_)
return f"File moved successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
import shutil
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileMoveInput(BaseModel):
"""Input for MoveFileTool."""
source_path: str = Field(..., description="Path of the file to move")
destination_path: str = Field(..., description="New path for the moved file")
class MoveFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that moves a file."""
name: str = "move_file"
args_schema: Type[BaseModel] = FileMoveInput
description: str = "Move or rename a file from one location to another"
def _run(
self,
source_path: str,
destination_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path_", value=destination_path_
)
if not source_path_.exists():
return f"Error: no such file or directory {source_path}"
try:
# shutil.move expects str args in 3.8
shutil.move(str(source_path_), destination_path_)
return f"File moved successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
# noqa since we don't need a timeout here as the request should fail immediately
requests.get("https://www.example.com") # noqa: S113
|
import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
requests.get("https://www.example.com")
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xe3\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 586
_DOCUMENTPROTO._serialized_start = 589
_DOCUMENTPROTO._serialized_end = 719
_DOCUMENTPROTO_DATAENTRY._serialized_start = 655
_DOCUMENTPROTO_DATAENTRY._serialized_end = 719
_DOCUMENTARRAYPROTO._serialized_start = 721
_DOCUMENTARRAYPROTO._serialized_end = 780
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xe2\x02\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12(\n\x06tensor\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProtob\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 585
_DOCUMENTPROTO._serialized_start = 588
_DOCUMENTPROTO._serialized_end = 718
_DOCUMENTPROTO_DATAENTRY._serialized_start = 654
_DOCUMENTPROTO_DATAENTRY._serialized_end = 718
_DOCUMENTARRAYPROTO._serialized_start = 720
_DOCUMENTARRAYPROTO._serialized_end = 779
# @@protoc_insertion_point(module_scope)
|
import os
import time
import pytest
from prometheus_api_client import PrometheusConnect
from jina.helper import random_port
@pytest.fixture()
def jaeger_port():
port = random_port()
os.environ['JAEGER_PORT'] = str(port)
yield port
del os.environ['JAEGER_PORT']
@pytest.fixture()
def prometheus_backend_port():
port = random_port()
os.environ['PROMETHEUS_BACKEND_PORT'] = str(port)
yield port
del os.environ['PROMETHEUS_BACKEND_PORT']
@pytest.fixture()
def otlp_receiver_port():
port = random_port()
os.environ['OTLP_RECEIVER_PORT'] = str(port)
yield port
del os.environ['OTLP_RECEIVER_PORT']
@pytest.fixture()
def otlp_collector(jaeger_port, prometheus_backend_port, otlp_receiver_port):
file_dir = os.path.dirname(__file__)
os.system(
f"docker compose -f {os.path.join(file_dir, 'docker-compose.yml')} up -d --remove-orphans"
)
time.sleep(1)
yield
os.system(
f"docker compose -f {os.path.join(file_dir, 'docker-compose.yml')} down --remove-orphans"
)
@pytest.fixture()
def prometheus_client(prometheus_backend_port, otlp_collector):
return PrometheusConnect(
url=f'http://localhost:{prometheus_backend_port}', disable_ssl=True
)
@pytest.fixture()
def set_metrics_export_interval():
os.environ['OTEL_METRIC_EXPORT_INTERVAL'] = '200' # milliseconds
yield
del os.environ['OTEL_METRIC_EXPORT_INTERVAL']
@pytest.fixture()
def instrumented_services_sharded():
return [
'gateway/rep-0',
'executor0/shard-0/rep-0',
'executor0/shard-1/rep-0',
'executor0/head',
]
@pytest.fixture()
def expected_flow_metric_labels():
return [
'jina_number_of_pending_requests',
'jina_received_request_bytes_bucket',
'jina_received_request_bytes_count',
'jina_received_request_bytes_sum',
'jina_received_response_bytes_bucket',
'jina_received_response_bytes_count',
'jina_received_response_bytes_sum',
'jina_receiving_request_seconds_bucket',
'jina_receiving_request_seconds_count',
'jina_receiving_request_seconds_sum',
'jina_sent_request_bytes_bucket',
'jina_sent_request_bytes_count',
'jina_sent_request_bytes_sum',
'jina_sent_response_bytes_bucket',
'jina_sent_response_bytes_count',
'jina_sent_response_bytes_sum',
'jina_successful_requests',
'jina_failed_requests',
'jina_document_processed',
'jina_receiving_request_seconds_bucket',
'jina_receiving_request_seconds_count',
'jina_receiving_request_seconds_sum',
'jina_sending_request_seconds_bucket',
'jina_sending_request_seconds_count',
'jina_sending_request_seconds_sum',
'jina_process_request_seconds_bucket',
'jina_process_request_seconds_count',
'jina_process_request_seconds_sum',
'executor',
'executor_endpoint',
'runtime_name',
'target_info', # otel or prometheus related
'request_counter', # otel or prometheus related
'up', # otel or prometheus related
]
|
import os
import time
import pytest
from prometheus_api_client import PrometheusConnect
from jina.helper import random_port
@pytest.fixture()
def jaeger_port():
port = random_port()
os.environ['JAEGER_PORT'] = str(port)
yield port
del os.environ['JAEGER_PORT']
@pytest.fixture()
def prometheus_backend_port():
port = random_port()
os.environ['PROMETHEUS_BACKEND_PORT'] = str(port)
yield port
del os.environ['PROMETHEUS_BACKEND_PORT']
@pytest.fixture()
def otlp_receiver_port():
port = random_port()
os.environ['OTLP_RECEIVER_PORT'] = str(port)
yield port
del os.environ['OTLP_RECEIVER_PORT']
@pytest.fixture()
def otlp_collector(jaeger_port, prometheus_backend_port, otlp_receiver_port):
file_dir = os.path.dirname(__file__)
os.system(
f"docker-compose -f {os.path.join(file_dir, 'docker-compose.yml')} up -d --remove-orphans"
)
time.sleep(1)
yield
os.system(
f"docker-compose -f {os.path.join(file_dir, 'docker-compose.yml')} down --remove-orphans"
)
@pytest.fixture()
def prometheus_client(prometheus_backend_port, otlp_collector):
return PrometheusConnect(
url=f'http://localhost:{prometheus_backend_port}', disable_ssl=True
)
@pytest.fixture()
def set_metrics_export_interval():
os.environ['OTEL_METRIC_EXPORT_INTERVAL'] = '200' # milliseconds
yield
del os.environ['OTEL_METRIC_EXPORT_INTERVAL']
@pytest.fixture()
def instrumented_services_sharded():
return [
'gateway/rep-0',
'executor0/shard-0/rep-0',
'executor0/shard-1/rep-0',
'executor0/head',
]
@pytest.fixture()
def expected_flow_metric_labels():
return [
'jina_number_of_pending_requests',
'jina_received_request_bytes_bucket',
'jina_received_request_bytes_count',
'jina_received_request_bytes_sum',
'jina_received_response_bytes_bucket',
'jina_received_response_bytes_count',
'jina_received_response_bytes_sum',
'jina_receiving_request_seconds_bucket',
'jina_receiving_request_seconds_count',
'jina_receiving_request_seconds_sum',
'jina_sent_request_bytes_bucket',
'jina_sent_request_bytes_count',
'jina_sent_request_bytes_sum',
'jina_sent_response_bytes_bucket',
'jina_sent_response_bytes_count',
'jina_sent_response_bytes_sum',
'jina_successful_requests',
'jina_failed_requests',
'jina_document_processed',
'jina_receiving_request_seconds_bucket',
'jina_receiving_request_seconds_count',
'jina_receiving_request_seconds_sum',
'jina_sending_request_seconds_bucket',
'jina_sending_request_seconds_count',
'jina_sending_request_seconds_sum',
'jina_process_request_seconds_bucket',
'jina_process_request_seconds_count',
'jina_process_request_seconds_sum',
'executor',
'executor_endpoint',
'runtime_name',
'target_info', # otel or prometheus related
'request_counter', # otel or prometheus related
'up', # otel or prometheus related
]
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_AddNoise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, torch.float64)
assert gradcheck(add_noise, (waveform, noise, lengths, snr))
assert gradgradcheck(add_noise, (waveform, noise, lengths, snr))
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.structures import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 2)
self.assertEqual(len(results['data_samples'].ignored_instances), 1)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_samples', results)
self.assertIsInstance(results['data_samples'], DetDataSample)
self.assertIsInstance(results['data_samples'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_samples'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_samples'].gt_instances), 3)
self.assertEqual(len(results['data_samples'].ignored_instances), 0)
self.assertIsInstance(results['data_samples'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_samples'].proposals, InstanceData)
self.assertEqual(len(results['data_samples'].proposals), 2)
self.assertIsInstance(results['data_samples'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
"""Chat generation output classes."""
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, Union
from pydantic import model_validator
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
if TYPE_CHECKING:
from typing_extensions import Self
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
text: str = ""
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
message: BaseMessage
"""The message output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@model_validator(mode="after")
def set_text(self) -> Self:
"""Set the text attribute to be the contents of the message.
Args:
values: The values of the object.
Returns:
The values of the object with the text attribute set.
Raises:
ValueError: If the message is not a string or a list.
"""
try:
text = ""
if isinstance(self.message.content, str):
text = self.message.content
# HACK: Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text = block
break
elif isinstance(block, dict) and "text" in block:
text = block["text"]
break
else:
pass
else:
pass
self.text = text
except (KeyError, AttributeError) as e:
msg = "Error while initializing ChatGeneration"
raise ValueError(msg) from e
return self
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk.
ChatGeneration chunks can be concatenated with other ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
"""Concatenate two ChatGenerationChunks.
Args:
other: The other ChatGenerationChunk or list of ChatGenerationChunks to
concatenate.
"""
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
elif isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
else:
msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
raise TypeError(msg)
|
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, Union
from pydantic import model_validator
from langchain_core.messages import BaseMessage, BaseMessageChunk
from langchain_core.outputs.generation import Generation
from langchain_core.utils._merge import merge_dicts
if TYPE_CHECKING:
from typing_extensions import Self
class ChatGeneration(Generation):
"""A single chat generation output.
A subclass of Generation that represents the response from a chat model
that generates chat messages.
The `message` attribute is a structured representation of the chat message.
Most of the time, the message will be of type `AIMessage`.
Users working with chat models will usually access information via either
`AIMessage` (returned from runnable interfaces) or `LLMResult` (available
via callbacks).
"""
text: str = ""
"""*SHOULD NOT BE SET DIRECTLY* The text contents of the output message."""
message: BaseMessage
"""The message output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGeneration"] = "ChatGeneration" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@model_validator(mode="after")
def set_text(self) -> Self:
"""Set the text attribute to be the contents of the message.
Args:
values: The values of the object.
Returns:
The values of the object with the text attribute set.
Raises:
ValueError: If the message is not a string or a list.
"""
try:
text = ""
if isinstance(self.message.content, str):
text = self.message.content
# HACK: Assumes text in content blocks in OpenAI format.
# Uses first text block.
elif isinstance(self.message.content, list):
for block in self.message.content:
if isinstance(block, str):
text = block
break
elif isinstance(block, dict) and "text" in block:
text = block["text"]
break
else:
pass
else:
pass
self.text = text
except (KeyError, AttributeError) as e:
msg = "Error while initializing ChatGeneration"
raise ValueError(msg) from e
return self
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "output"]
class ChatGenerationChunk(ChatGeneration):
"""ChatGeneration chunk, which can be concatenated with other
ChatGeneration chunks.
"""
message: BaseMessageChunk
"""The message chunk output by the chat model."""
# Override type to be ChatGeneration, ignore mypy error as this is intentional
type: Literal["ChatGenerationChunk"] = "ChatGenerationChunk" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "output"]
def __add__(
self, other: Union[ChatGenerationChunk, list[ChatGenerationChunk]]
) -> ChatGenerationChunk:
if isinstance(other, ChatGenerationChunk):
generation_info = merge_dicts(
self.generation_info or {},
other.generation_info or {},
)
return ChatGenerationChunk(
message=self.message + other.message,
generation_info=generation_info or None,
)
elif isinstance(other, list) and all(
isinstance(x, ChatGenerationChunk) for x in other
):
generation_info = merge_dicts(
self.generation_info or {},
*[chunk.generation_info for chunk in other if chunk.generation_info],
)
return ChatGenerationChunk(
message=self.message + [chunk.message for chunk in other],
generation_info=generation_info or None,
)
else:
msg = (
f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'"
)
raise TypeError(msg)
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode="mean",
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-mean-pooling-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-mean-pooling-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode="mean",
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-mean-pooling-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-mean-pooling-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute()
dll_path = [curr_path.parents[1],
curr_path.parents[0] / 'bin',
curr_path.parents[0] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute()
dll_path = [curr_path,
curr_path.parents[1],
curr_path.parents[0] / 'bin',
curr_path.parents[0] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (autocast_box_type, convert_box_type, get_box_type,
register_box, register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, cat_boxes, corner2bbox,
distance2bbox, empty_box_as, find_inside_bboxes,
get_box_tensor, get_box_wh, roi2bbox, scale_boxes,
stack_boxes)
__all__ = [
'bbox_overlaps', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'find_inside_bboxes', 'bbox2corner', 'corner2bbox', 'bbox_project',
'BaseBoxes', 'convert_box_type', 'get_box_type', 'register_box',
'register_box_converter', 'HorizontalBoxes', 'autocast_box_type',
'cat_boxes', 'stack_boxes', 'scale_boxes', 'get_box_wh', 'get_box_tensor',
'empty_box_as'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_boxes import BaseBoxes
from .bbox_overlaps import bbox_overlaps
from .box_type import (autocast_box_type, convert_box_type, get_box_type,
register_box, register_box_converter)
from .horizontal_boxes import HorizontalBoxes
from .transforms import (bbox2corner, bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_project, bbox_rescale,
bbox_xyxy_to_cxcywh, corner2bbox, distance2bbox,
find_inside_bboxes, roi2bbox)
__all__ = [
'bbox_overlaps', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'find_inside_bboxes', 'bbox2corner', 'corner2bbox', 'bbox_project',
'BaseBoxes', 'convert_box_type', 'get_box_type', 'register_box',
'register_box_converter', 'HorizontalBoxes', 'autocast_box_type'
]
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
word_embedding_dimension: Dimension of the word embeddings (vocab size)
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
SPLADE_POOLING_MODES = (
"sum",
"max",
)
def __init__(
self,
word_embedding_dimension: int,
pooling_strategy: str = "max",
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy"]
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the mofrom ...models.Pooling import Pooling
del.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sentence_embedding": pooled_scores}
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
|
from __future__ import annotations
import torch
from torch import nn
# TODO: SAVING LOADING with config.json
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
def __init__(
self,
pooling_strategy: str = "max",
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in ["max", "sum"]:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the mofrom ...models.Pooling import Pooling
del.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sentence_embedding": pooled_scores}
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the SPLADE embeddings (vocabulary size)"""
# This will be set by the MLMTransformer
return self.auto_model.config.vocab_size if hasattr(self, "auto_model") else None
|
_base_ = './ms-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './ms_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
"""
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
from typing import Dict
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import xgboost as xgb
class Plotting(xgb.callback.TrainingCallback):
"""Plot evaluation result during training. Only for demonstration purpose as it's
quite slow to draw using matplotlib.
"""
def __init__(self, rounds: int) -> None:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines: Dict[str, plt.Line2D] = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data: str, metric: str) -> str:
return f"{data}-{metric}"
def after_iteration(
self, model: xgb.Booster, epoch: int, evals_log: Dict[str, dict]
) -> bool:
"""Update the plot."""
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback() -> None:
"""Demo for defining a custom callback function that plots evaluation result during
training."""
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
"objective": "binary:logistic",
"eval_metric": ["error", "rmse"],
"tree_method": "hist",
"device": "cuda",
},
D_train,
evals=[(D_train, "Train"), (D_valid, "Valid")],
num_boost_round=num_boost_round,
callbacks=[plotting],
)
def check_point_callback() -> None:
"""Demo for using the checkpoint callback. Custom logic for handling output is
usually required and users are encouraged to define their own callback for
checkpointing operations. The builtin one can be used as a starting point.
"""
# Only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle: bool) -> None:
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
else:
path = os.path.join(
tmpdir,
f"model_{i}.{xgb.callback.TrainingCheckPoint.default_format}",
)
assert os.path.exists(path)
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, as_pickle=True, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
|
"""
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
from typing import Dict
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import xgboost as xgb
class Plotting(xgb.callback.TrainingCallback):
"""Plot evaluation result during training. Only for demonstration purpose as it's
quite slow to draw using matplotlib.
"""
def __init__(self, rounds: int) -> None:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines: Dict[str, plt.Line2D] = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data: str, metric: str) -> str:
return f"{data}-{metric}"
def after_iteration(
self, model: xgb.Booster, epoch: int, evals_log: Dict[str, dict]
) -> bool:
"""Update the plot."""
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback() -> None:
"""Demo for defining a custom callback function that plots evaluation result during
training."""
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
"objective": "binary:logistic",
"eval_metric": ["error", "rmse"],
"tree_method": "hist",
"device": "cuda",
},
D_train,
evals=[(D_train, "Train"), (D_valid, "Valid")],
num_boost_round=num_boost_round,
callbacks=[plotting],
)
def check_point_callback() -> None:
"""Demo for using the checkpoint callback. Custom logic for handling output is
usually required and users are encouraged to define their own callback for
checkpointing operations. The builtin one can be used as a starting point.
"""
# Only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle: bool) -> None:
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
else:
path = os.path.join(
tmpdir,
f"model_{i}.{xgb.callback.TrainingCheckPoint.default_format}",
)
assert os.path.exists(path)
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, as_pickle=True, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize as deserialize
from keras.src.constraints import get as get
from keras.src.constraints import serialize as serialize
from keras.src.constraints.constraints import Constraint as Constraint
from keras.src.constraints.constraints import MaxNorm as MaxNorm
from keras.src.constraints.constraints import MaxNorm as max_norm
from keras.src.constraints.constraints import MinMaxNorm as MinMaxNorm
from keras.src.constraints.constraints import MinMaxNorm as min_max_norm
from keras.src.constraints.constraints import NonNeg as NonNeg
from keras.src.constraints.constraints import NonNeg as non_neg
from keras.src.constraints.constraints import UnitNorm as UnitNorm
from keras.src.constraints.constraints import UnitNorm as unit_norm
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.constraints import deserialize
from keras.src.constraints import get
from keras.src.constraints import serialize
from keras.src.constraints.constraints import Constraint
from keras.src.constraints.constraints import MaxNorm
from keras.src.constraints.constraints import MaxNorm as max_norm
from keras.src.constraints.constraints import MinMaxNorm
from keras.src.constraints.constraints import MinMaxNorm as min_max_norm
from keras.src.constraints.constraints import NonNeg
from keras.src.constraints.constraints import NonNeg as non_neg
from keras.src.constraints.constraints import UnitNorm
from keras.src.constraints.constraints import UnitNorm as unit_norm
|
"""DeepLake multimodal Retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.multi_modal import MultiModalVectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import SimpleMultiModalQueryEngine
from llama_index.core.schema import BaseNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
class DeepLakeMultimodalRetrieverPack(BaseLlamaPack):
"""DeepLake Multimodal retriever pack."""
def __init__(
self,
dataset_path: str = "llama_index",
token: Optional[str] = None,
read_only: Optional[bool] = False,
overwrite: bool = False,
verbose: bool = True,
nodes: Optional[List[BaseNode]] = None,
top_k: int = 4,
**kwargs: Any,
):
# text vector store
self._text_vectorstore = DeepLakeVectorStore(
dataset_path=dataset_path + "_text",
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
# image vector store
self._image_vectorstore = DeepLakeVectorStore(
dataset_path=dataset_path + "_image",
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._text_vectorstore
)
self._index = MultiModalVectorStoreIndex(
nodes,
storage_context=self._storage_context,
image_vector_store=self._image_vectorstore,
)
else:
self._storage_context = StorageContext.from_defaults(
vector_store=self._text_vectorstore
)
self._index = MultiModalVectorStoreIndex.from_vector_store(
self._text_vectorstore,
image_vector_store=self._image_vectorstore,
)
self.retriever = self._index.as_retriever(
similarity_top_k=top_k, vector_store_kwargs={"deep_memory": True}
)
self.query_engine = SimpleMultiModalQueryEngine(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"text_vectorstore": self._text_vectorstore,
"image_vectorstore": self._image_vectorstore,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.query_engine.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""DeepLake multimodal Retrieval Pack."""
from typing import Any, Dict, List, Optional
from llama_index.core.indices.multi_modal import MultiModalVectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.query_engine import SimpleMultiModalQueryEngine
from llama_index.core.schema import BaseNode
from llama_index.core.storage.storage_context import StorageContext
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
class DeepLakeMultimodalRetrieverPack(BaseLlamaPack):
"""DeepLake Multimodal retriever pack."""
def __init__(
self,
dataset_path: str = "llama_index",
token: Optional[str] = None,
read_only: Optional[bool] = False,
overwrite: bool = False,
verbose: bool = True,
nodes: Optional[List[BaseNode]] = None,
top_k: int = 4,
**kwargs: Any,
):
# text vector store
self._text_vectorstore = DeepLakeVectorStore(
dataset_path=dataset_path + "_text",
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
# image vector store
self._image_vectorstore = DeepLakeVectorStore(
dataset_path=dataset_path + "_image",
token=token,
read_only=read_only,
overwrite=overwrite,
verbose=verbose,
)
if nodes is not None:
self._storage_context = StorageContext.from_defaults(
vector_store=self._text_vectorstore
)
self._index = MultiModalVectorStoreIndex(
nodes,
storage_context=self._storage_context,
image_vector_store=self._image_vectorstore,
)
else:
self._storage_context = StorageContext.from_defaults(
vector_store=self._text_vectorstore
)
self._index = MultiModalVectorStoreIndex.from_vector_store(
self._text_vectorstore,
image_vector_store=self._image_vectorstore,
)
self.retriever = self._index.as_retriever(
similarity_top_k=top_k, vector_store_kwargs={"deep_memory": True}
)
self.query_engine = SimpleMultiModalQueryEngine(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"text_vectorstore": self._text_vectorstore,
"image_vectorstore": self._image_vectorstore,
"storage_context": self._storage_context,
"index": self._index,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def retrieve(self, query_str: str) -> Any:
"""Retrieve."""
return self.query_engine.retrieve(query_str)
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
from jina_cli.export import api_to_dict
def _build_lookup_table():
all_keywords = {}
import copy
def build_invert_index(d, usage='jina'):
for k in d['methods']:
usg = f'{usage} {k["name"]}'
if 'methods' in k:
build_invert_index(k, usage=usg)
if k['name'] not in all_keywords:
all_keywords[k['name']] = []
_k = {'name': k['name'], 'type': 'command', 'usage': usg, 'help': k['help']}
all_keywords[k['name']].append(_k)
if 'options' in k:
for kk in k['options']:
if kk['name'] not in all_keywords:
all_keywords[kk['name']] = []
_kk = copy.deepcopy(kk)
_kk['usage'] = usg
all_keywords[kk['name']].append(_kk)
def build_noisy_index(d):
noise2key = {}
for k, z in d.items():
for v in z:
noises = [k]
noises.append(v.get('name', []))
noises.extend(v.get('option_strings', []))
dash_to_space = [k.replace('-', ' ').replace('_', ' ') for k in noises]
no_dash = [k.replace('-', '').replace('_', '') for k in noises]
no_leading_dash = [k.replace('--', '') for k in noises]
noises.extend(dash_to_space)
noises.extend(no_dash)
noises.extend(no_leading_dash)
no_ending_plural = [k[:-1] if k.endswith('s') else k for k in noises]
noises.extend(no_ending_plural)
for kk in set(noises):
noise2key[kk] = k
return noise2key
build_invert_index(api_to_dict(show_all_args=True))
nkw2kw = build_noisy_index(all_keywords)
return nkw2kw, all_keywords
def _prettyprint_help(d, also_in=None):
from jina.helper import colored
if d['type'] == 'command':
print(
f'''
{colored(d['name'], attrs='bold')} is a CLI command of Jina.
{colored(d['help'], attrs='bold')}
More info: {d['usage']} --help
'''
)
else:
availables = ' '.join(
colored(v, attrs='underline')
for v in (set(h['usage'] for h in also_in) if also_in else {d['usage']})
)
option_str = ' '.join(colored(v, attrs='bold') for v in d['option_strings'])
if option_str:
option_str = f'as {option_str}'
table = {}
table['Type'] = d['type']
table['Required'] = d['required']
if d['choices']:
table['Choices'] = ' | '.join(d['choices'])
if not d['default_random'] and d['default'] is not None:
table['Default'] = d['default']
if d['default_random']:
table['Remark'] = colored(
'This argument has a random default value!', 'yellow'
)
table_str = '\n '.join(
f'{k + ": " + colored(v, attrs="bold")}' for k, v in table.items()
)
lb = '\033[F'
import argparse
print(
f'''
{colored(d['name'], attrs='bold')} is {colored('an internal CLI of Jina, should not be used directly', color='yellow') if d['help'] == argparse.SUPPRESS else 'a CLI argument of Jina'}.
It is available in {availables} {option_str}
{colored(d['help'], attrs='bold') if d['help'] != argparse.SUPPRESS else lb * 2}
{table_str}
'''
)
def lookup_and_print(query: str):
"""Lookup argument name in Jina API and prettyprint the result.
:param query: the argument (fuzzy) name
"""
nkw2kw, kw2info = _build_lookup_table()
if query not in nkw2kw:
from jina.helper import colored
print(
f'Can not find argument {colored(query, attrs="bold")}, '
f'maybe it\'s a misspelling or Jina does not have this argument.'
)
else:
helps = kw2info[nkw2kw[query]] # type: list
if len(helps) == 1:
_prettyprint_help(helps[0])
elif len(helps) > 1 and len(set(h['help'] for h in helps)) == 1:
_prettyprint_help(helps[0], also_in=helps)
elif len(helps) > 1:
from collections import defaultdict
from jina.helper import colored
help_group = defaultdict(list)
for h in helps:
help_group[h['help']].append(h)
print(
colored(f'Found {len(help_group)} mentions in Jina API.', attrs='dark')
)
for hg in help_group.values():
_prettyprint_help(hg[0], also_in=hg)
print(colored('─' * 40, attrs='dark'))
|
from jina_cli.export import api_to_dict
def _build_lookup_table():
all_keywords = {}
import copy
def build_invert_index(d, usage='jina'):
for k in d['methods']:
usg = f'{usage} {k["name"]}'
if 'methods' in k:
build_invert_index(k, usage=usg)
if k['name'] not in all_keywords:
all_keywords[k['name']] = []
_k = {'name': k['name'], 'type': 'command', 'usage': usg, 'help': k['help']}
all_keywords[k['name']].append(_k)
if 'options' in k:
for kk in k['options']:
if kk['name'] not in all_keywords:
all_keywords[kk['name']] = []
_kk = copy.deepcopy(kk)
_kk['usage'] = usg
all_keywords[kk['name']].append(_kk)
def build_noisy_index(d):
noise2key = {}
for k, z in d.items():
for v in z:
noises = [k]
noises.append(v.get('name', []))
noises.extend(v.get('option_strings', []))
dash_to_space = [k.replace('-', ' ').replace('_', ' ') for k in noises]
no_dash = [k.replace('-', '').replace('_', '') for k in noises]
no_leading_dash = [k.replace('--', '') for k in noises]
noises.extend(dash_to_space)
noises.extend(no_dash)
noises.extend(no_leading_dash)
no_ending_plural = [k[:-1] if k.endswith('s') else k for k in noises]
noises.extend(no_ending_plural)
for kk in set(noises):
noise2key[kk] = k
return noise2key
build_invert_index(api_to_dict(show_all_args=True))
nkw2kw = build_noisy_index(all_keywords)
return nkw2kw, all_keywords
def _prettyprint_help(d, also_in=None):
from jina.helper import colored
if d['type'] == 'command':
print(
f'''
{colored(d['name'], attrs='bold')} is a CLI command of Jina.
{colored(d['help'], attrs='bold')}
More info: {d['usage']} --help
'''
)
else:
availables = ' '.join(
colored(v, attrs='underline')
for v in (set(h['usage'] for h in also_in) if also_in else {d['usage']})
)
option_str = ' '.join(colored(v, attrs='bold') for v in d['option_strings'])
if option_str:
option_str = f'as {option_str}'
table = {}
table['Type'] = d['type']
table['Required'] = d['required']
if d['choices']:
table['Choices'] = ' | '.join(d['choices'])
if not d['default_random'] and d['default'] is not None:
table['Default'] = d['default']
if d['default_random']:
table['Remark'] = colored(
'This argument has a random default value!', 'on yellow'
)
table_str = '\n '.join(
f'{k + ": " + colored(v, attrs="bold")}' for k, v in table.items()
)
lb = '\033[F'
import argparse
print(
f'''
{colored(d['name'], attrs='bold')} is {colored('an internal CLI of Jina, should not be used directly', color='yellow') if d['help'] == argparse.SUPPRESS else 'a CLI argument of Jina.'}.
It is available in {availables} {option_str}
{colored(d['help'], attrs='bold') if d['help'] != argparse.SUPPRESS else lb * 2}
{table_str}
'''
)
def lookup_and_print(query: str):
"""Lookup argument name in Jina API and prettyprint the result.
:param query: the argument (fuzzy) name
"""
nkw2kw, kw2info = _build_lookup_table()
if query not in nkw2kw:
from jina.helper import colored
print(
f'Can not find argument {colored(query, attrs="bold")}, '
f'maybe it\'s a misspelling or Jina does not have this argument.'
)
else:
helps = kw2info[nkw2kw[query]] # type: list
if len(helps) == 1:
_prettyprint_help(helps[0])
elif len(helps) > 1 and len(set(h['help'] for h in helps)) == 1:
_prettyprint_help(helps[0], also_in=helps)
elif len(helps) > 1:
from collections import defaultdict
from jina.helper import colored
help_group = defaultdict(list)
for h in helps:
help_group[h['help']].append(h)
print(
colored(f'Found {len(help_group)} mentions in Jina API.', attrs='dark')
)
for hg in help_group.values():
_prettyprint_help(hg[0], also_in=hg)
print(colored('─' * 40, attrs='dark'))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.volcengine_maas import (
VolcEngineMaasChat,
convert_dict_to_message,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_dict_to_message": "langchain_community.chat_models.volcengine_maas",
"VolcEngineMaasChat": "langchain_community.chat_models.volcengine_maas",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"VolcEngineMaasChat",
"convert_dict_to_message",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.volcengine_maas import (
VolcEngineMaasChat,
convert_dict_to_message,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"convert_dict_to_message": "langchain_community.chat_models.volcengine_maas",
"VolcEngineMaasChat": "langchain_community.chat_models.volcengine_maas",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"convert_dict_to_message",
"VolcEngineMaasChat",
]
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_bboxes_labels', 'gt_ignore_flags'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
},
skip_img_without_anno=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
albu_train_transforms = [
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(
type='OneOf',
transforms=[
dict(
type='RGBShift',
r_shift_limit=10,
g_shift_limit=10,
b_shift_limit=10,
p=1.0),
dict(
type='HueSaturationValue',
hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20,
p=1.0)
],
p=0.1),
dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_bboxes_labels', 'gt_ignore_flags'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
},
skip_img_without_anno=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tracemalloc
from functools import wraps
from docarray import DocList
from docarray.documents import TextDoc
def get_test_da(n: int):
return DocList[TextDoc](gen_text_docs(n))
def gen_text_docs(n: int):
for i in range(n):
yield TextDoc(text=f'text {i}')
def profile_memory(func):
"""Decorator to profile memory usage of a function.
Returns:
original function return value, (current memory usage, peak memory usage)
"""
@wraps(func)
def _inner(*args, **kwargs):
tracemalloc.start()
ret = func(*args, **kwargs)
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return ret, (current, peak)
return _inner
|
import tracemalloc
from functools import wraps
from docarray import DocList
from docarray.documents import TextDoc
def get_test_da(n: int):
return DocList[TextDoc](gen_text_docs(n))
def gen_text_docs(n: int):
for i in range(n):
yield TextDoc(text=f'text {i}')
def profile_memory(func):
"""Decorator to profile memory usage of a function.
Returns:
original function return value, (current memory usage, peak memory usage)
"""
@wraps(func)
def _inner(*args, **kwargs):
tracemalloc.start()
ret = func(*args, **kwargs)
current, peak = tracemalloc.get_traced_memory()
tracemalloc.stop()
return ret, (current, peak)
return _inner
|
ac_file = '../jina_cli/autocomplete.py'
def _update_autocomplete():
from jina.parsers import get_main_parser
def _gaa(key, parser):
_result = {}
_compl = []
for v in parser._actions:
if v.option_strings:
_compl.extend(v.option_strings)
elif v.choices:
_compl.extend(v.choices)
if isinstance(v.choices, dict):
for kk, vv in v.choices.items():
_result.update(_gaa(' '.join([key, kk]).strip(), vv))
# filer out single dash, as they serve as abbrev
_compl = [k for k in _compl if (not k.startswith('-') or k.startswith('--'))]
_result.update({key: _compl})
return _result
compl = _gaa('', get_main_parser())
cmd = compl.pop('')
compl = {'commands': cmd, 'completions': compl}
with open(ac_file, 'w', encoding='utf-8') as fp:
fp.write(f'ac_table = {compl}\n')
if __name__ == '__main__':
_update_autocomplete()
|
ac_file = '../jina_cli/autocomplete.py'
def _update_autocomplete():
from jina.parsers import get_main_parser
def _gaa(key, parser):
_result = {}
_compl = []
for v in parser._actions:
if v.option_strings:
_compl.extend(v.option_strings)
elif v.choices:
_compl.extend(v.choices)
if isinstance(v.choices, dict):
for kk, vv in v.choices.items():
_result.update(_gaa(' '.join([key, kk]).strip(), vv))
# filer out single dash, as they serve as abbrev
_compl = [k for k in _compl if (not k.startswith('-') or k.startswith('--'))]
_result.update({key: _compl})
return _result
compl = _gaa('', get_main_parser())
cmd = compl.pop('')
compl = {'commands': cmd, 'completions': compl}
with open(ac_file, 'w') as fp:
fp.write(f'ac_table = {compl}\n')
if __name__ == '__main__':
_update_autocomplete()
|
import types
from typing import TYPE_CHECKING
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import ( # noqa
AudioTensorFlowTensor,
)
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__ = ['AudioNdArray', 'AudioTensor']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'AudioTorchTensor':
import_library('torch', raise_error=True)
import docarray.typing.tensor.audio.audio_torch_tensor as lib
elif name == 'AudioTensorFlowTensor':
import_library('tensorflow', raise_error=True)
import docarray.typing.tensor.audio.audio_tensorflow_tensor as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
__all__ = ['AudioNdArray']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__.extend(['AudioTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import ( # noqa
AudioTensorFlowTensor,
)
__all__.extend(['AudioTensorFlowTensor'])
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-improved-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=1,
warmup_steps=100,
use_amp=True, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from datetime import datetime
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. We use a batch size of 16, for every positive example we include 8-1=7 negative examples
# Sentences are truncated to 75 word pieces
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
epochs = 1
max_seq_length = 75
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
output_path = "output/train_askubuntu_ct-improved-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=1,
warmup_steps=100,
use_amp=True, # Set to True, if your GPU has optimized FP16 cores
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINAppOps
from langchain_community.tools.ainetwork.app import AppOperationType, AppSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AppOperationType": "langchain_community.tools.ainetwork.app",
"AppSchema": "langchain_community.tools.ainetwork.app",
"AINAppOps": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINAppOps",
"AppOperationType",
"AppSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINAppOps
from langchain_community.tools.ainetwork.app import AppOperationType, AppSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AppOperationType": "langchain_community.tools.ainetwork.app",
"AppSchema": "langchain_community.tools.ainetwork.app",
"AINAppOps": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AppOperationType",
"AppSchema",
"AINAppOps",
]
|
from typing import Any, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (get_origin(x) in (list, tuple, dict, set)) or is_typevar(x):
return False
return issubclass(x, a_tuple)
|
from typing import Any, Optional
from typing_inspect import get_args, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
|
"""Argparser module for hub push"""
import argparse
import os
from jina.parsers.helper import add_arg_group
def mixin_hub_push_parser(parser):
"""Add the arguments for hub push to the parser
:param parser: the parser configure
"""
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help='If set, more information will be printed.',
)
gp = add_arg_group(parser, title='Push')
gp.add_argument(
'path',
type=dir_path,
help='The Executor folder to be pushed to Jina Hub',
)
gp.add_argument(
'-f',
'--dockerfile',
metavar='DOCKERFILE',
help='The file path to the Dockerfile (default is `${cwd}/Dockerfile`)',
)
gp.add_argument(
'-t',
'--tag',
action='append',
help='''
A list of tags. One can use it to distinguish architecture (e.g. `cpu`, `gpu`) or versions (e.g. `v1`, `v2`).
One can later fetch a tagged Executor via `jinahub[+docker]://MyExecutor/gpu`
''',
)
gp.add_argument(
'--protected-tag',
action='append',
help='A list of protected tags. Like tag but protected against updates after first push.',
)
gp.add_argument(
'--force-update',
'--force',
type=str,
help='If set, push will overwrite the Executor on the Hub that shares the same NAME or UUID8 identifier',
)
gp.add_argument(
'--build-env',
type=str,
help='A list of environment variables. It will be used in project build phase.',
)
gp.add_argument(
'--secret',
type=str,
help='The secret for overwrite a Hub executor',
)
gp.add_argument(
'--no-cache',
action='store_true',
default=False,
help='If set, "--no-cache" option will be added to the Docker build.',
)
gp = add_arg_group(parser, title='Visibility')
mutually_exclusive_group = gp.add_mutually_exclusive_group()
mutually_exclusive_group.add_argument(
'--public',
action='store_true',
default=argparse.SUPPRESS,
help='If set, the pushed executor is visible to public',
)
mutually_exclusive_group.add_argument(
'--private',
action='store_true',
default=argparse.SUPPRESS,
help='If set, the pushed executor is invisible to public',
)
|
"""Argparser module for hub push"""
import argparse
import os
from jina.parsers.helper import add_arg_group
def mixin_hub_push_parser(parser):
"""Add the arguments for hub push to the parser
:param parser: the parser configure
"""
def dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
parser.add_argument(
'--verbose',
action='store_true',
default=False,
help='If set, more information will be printed.',
)
gp = add_arg_group(parser, title='Push')
gp.add_argument(
'path',
type=dir_path,
help='The Executor folder to be pushed to Jina Hub',
)
gp.add_argument(
'-f',
'--dockerfile',
metavar='DOCKERFILE',
help='The file path to the Dockerfile (default is `${cwd}/Dockerfile`)',
)
gp.add_argument(
'-t',
'--tag',
action='append',
help='''
A list of tags. One can use it to distinguish architecture (e.g. `cpu`, `gpu`) or versions (e.g. `v1`, `v2`).
One can later fetch a tagged Executor via `jinahub[+docker]://MyExecutor/gpu`
''',
)
gp.add_argument(
'--protected-tag',
action='append',
help='A list of protected tags. Like tag but protected against updates after first push.',
)
gp.add_argument(
'--force-update',
'--force',
type=str,
help='If set, push will overwrite the Executor on the Hub that shares the same NAME or UUID8 identifier',
)
gp.add_argument(
'--secret',
type=str,
help='The secret for overwrite a Hub executor',
)
gp.add_argument(
'--no-cache',
action='store_true',
default=False,
help='If set, "--no-cache" option will be added to the Docker build.',
)
gp = add_arg_group(parser, title='Visibility')
mutually_exclusive_group = gp.add_mutually_exclusive_group()
mutually_exclusive_group.add_argument(
'--public',
action='store_true',
default=argparse.SUPPRESS,
help='If set, the pushed executor is visible to public',
)
mutually_exclusive_group.add_argument(
'--private',
action='store_true',
default=argparse.SUPPRESS,
help='If set, the pushed executor is invisible to public',
)
|
import logging
import os
from functools import cache
from autogpt_libs.utils.cache import thread_cached
from dotenv import load_dotenv
from redis import Redis
from redis.asyncio import Redis as AsyncRedis
from backend.util.retry import conn_retry
load_dotenv()
HOST = os.getenv("REDIS_HOST", "localhost")
PORT = int(os.getenv("REDIS_PORT", "6379"))
PASSWORD = os.getenv("REDIS_PASSWORD", "password")
logger = logging.getLogger(__name__)
@conn_retry("Redis", "Acquiring connection")
def connect() -> Redis:
c = Redis(
host=HOST,
port=PORT,
password=PASSWORD,
decode_responses=True,
)
c.ping()
return c
@conn_retry("Redis", "Releasing connection")
def disconnect():
get_redis().close()
@cache
def get_redis() -> Redis:
return connect()
@conn_retry("AsyncRedis", "Acquiring connection")
async def connect_async() -> AsyncRedis:
c = AsyncRedis(
host=HOST,
port=PORT,
password=PASSWORD,
decode_responses=True,
)
await c.ping()
return c
@conn_retry("AsyncRedis", "Releasing connection")
async def disconnect_async():
c = await get_redis_async()
await c.close()
@thread_cached
async def get_redis_async() -> AsyncRedis:
return await connect_async()
|
import logging
import os
from dotenv import load_dotenv
from redis import Redis
from redis.asyncio import Redis as AsyncRedis
from backend.util.retry import conn_retry
load_dotenv()
HOST = os.getenv("REDIS_HOST", "localhost")
PORT = int(os.getenv("REDIS_PORT", "6379"))
PASSWORD = os.getenv("REDIS_PASSWORD", "password")
logger = logging.getLogger(__name__)
connection: Redis | None = None
connection_async: AsyncRedis | None = None
@conn_retry("Redis", "Acquiring connection")
def connect() -> Redis:
global connection
if connection:
return connection
c = Redis(
host=HOST,
port=PORT,
password=PASSWORD,
decode_responses=True,
)
c.ping()
connection = c
return connection
@conn_retry("Redis", "Releasing connection")
def disconnect():
global connection
if connection:
connection.close()
connection = None
def get_redis(auto_connect: bool = True) -> Redis:
if connection:
return connection
if auto_connect:
return connect()
raise RuntimeError("Redis connection is not established")
@conn_retry("AsyncRedis", "Acquiring connection")
async def connect_async() -> AsyncRedis:
global connection_async
if connection_async:
return connection_async
c = AsyncRedis(
host=HOST,
port=PORT,
password=PASSWORD,
decode_responses=True,
)
await c.ping()
connection_async = c
return connection_async
@conn_retry("AsyncRedis", "Releasing connection")
async def disconnect_async():
global connection_async
if connection_async:
await connection_async.close()
connection_async = None
async def get_redis_async(auto_connect: bool = True) -> AsyncRedis:
if connection_async:
return connection_async
if auto_connect:
return await connect_async()
raise RuntimeError("AsyncRedis connection is not established")
|
from pathlib import Path
from typing import Tuple, Union
import torch
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
from torchaudio._internal.module_utils import dropping_support, dropping_class_support
_SUBSETS = ["music", "noise", "speech"]
_SAMPLE_RATE = 16_000
@dropping_class_support
class Musan(Dataset):
r"""*MUSAN* :cite:`musan2015` dataset.
Args:
root (str or Path): Root directory where the dataset's top-level directory exists.
subset (str): Subset of the dataset to use. Options: [``"music"``, ``"noise"``, ``"speech"``].
"""
def __init__(self, root: Union[str, Path], subset: str):
if subset not in _SUBSETS:
raise ValueError(f"Invalid subset '{subset}' given. Please provide one of {_SUBSETS}")
subset_path = Path(root) / subset
self._walker = [str(p) for p in subset_path.glob("*/*.*")]
def get_metadata(self, n: int) -> Tuple[str, int, str]:
r"""Get metadata for the n-th sample in the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): Index of sample to be loaded.
Returns:
(str, int, str):
str
Path to audio.
int
Sample rate.
str
File name.
"""
audio_path = self._walker[n]
return audio_path, _SAMPLE_RATE, Path(audio_path).name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
r"""Return the n-th sample in the dataset.
Args:
n (int): Index of sample to be loaded.
Returns:
(torch.Tensor, int, str):
torch.Tensor
Waveform.
int
Sample rate.
str
File name.
"""
audio_path, sample_rate, filename = self.get_metadata(n)
path = Path(audio_path)
return _load_waveform(path.parent, path.name, sample_rate), sample_rate, filename
def __len__(self) -> int:
return len(self._walker)
|
from pathlib import Path
from typing import Tuple, Union
import torch
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _load_waveform
from torchaudio._internal.module_utils import dropping_support
_SUBSETS = ["music", "noise", "speech"]
_SAMPLE_RATE = 16_000
class Musan(Dataset):
r"""*MUSAN* :cite:`musan2015` dataset.
Args:
root (str or Path): Root directory where the dataset's top-level directory exists.
subset (str): Subset of the dataset to use. Options: [``"music"``, ``"noise"``, ``"speech"``].
"""
@dropping_support
def __init__(self, root: Union[str, Path], subset: str):
if subset not in _SUBSETS:
raise ValueError(f"Invalid subset '{subset}' given. Please provide one of {_SUBSETS}")
subset_path = Path(root) / subset
self._walker = [str(p) for p in subset_path.glob("*/*.*")]
def get_metadata(self, n: int) -> Tuple[str, int, str]:
r"""Get metadata for the n-th sample in the dataset. Returns filepath instead of waveform,
but otherwise returns the same fields as :py:func:`__getitem__`.
Args:
n (int): Index of sample to be loaded.
Returns:
(str, int, str):
str
Path to audio.
int
Sample rate.
str
File name.
"""
audio_path = self._walker[n]
return audio_path, _SAMPLE_RATE, Path(audio_path).name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
r"""Return the n-th sample in the dataset.
Args:
n (int): Index of sample to be loaded.
Returns:
(torch.Tensor, int, str):
torch.Tensor
Waveform.
int
Sample rate.
str
File name.
"""
audio_path, sample_rate, filename = self.get_metadata(n)
path = Path(audio_path)
return _load_waveform(path.parent, path.name, sample_rate), sample_rate, filename
def __len__(self) -> int:
return len(self._walker)
|
__version__ = '0.33.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.32.2'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
# Copyright (c) OpenMMLab. All rights reserved.
# config now can have imported modules and defined functions for convenience
import os.path as osp
def func():
return 'string with \tescape\\ characters\n'
test_item1 = [1, 2]
bool_item2 = True
str_item3 = 'test'
dict_item4 = dict(
a={
'c/d': 'path/d',
'f': 's3//f',
6: '2333',
'2333': 'number'
},
b={'8': 543},
c={9: 678},
d={'a': 0},
f=dict(a='69'))
dict_item5 = {'x/x': {'a.0': 233}}
dict_list_item6 = {'x/x': [{'a.0': 1., 'b.0': 2.}, {'c/3': 3.}]}
# Test windows path and escape.
str_item_7 = osp.join(osp.expanduser('~'), 'folder') # with backslash in
str_item_8 = func()
|
# Copyright (c) OpenMMLab. All rights reserved.
test_item1 = [1, 2]
bool_item2 = True
str_item3 = 'test'
dict_item4 = dict(
a={
'c/d': 'path/d',
'f': 's3//f',
6: '2333',
'2333': 'number'
},
b={'8': 543},
c={9: 678},
d={'a': 0},
f=dict(a='69'))
dict_item5 = {'x/x': {'a.0': 233}}
dict_list_item6 = {'x/x': [{'a.0': 1., 'b.0': 2.}, {'c/3': 3.}]}
|
__version__ = '0.14.11'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.10'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
if self.openai_pre_1_0:
condition = results["flagged"]
else:
condition = results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
if self.openai_pre_1_0:
condition = results["flagged"]
else:
condition = results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
PYDANTIC_MAJOR_VERSION,
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
if PYDANTIC_MAJOR_VERSION == 2:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
if issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1
try:
return self.pydantic_object.parse_obj(obj)
except pydantic.ValidationError as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
PydanticOutputParser.model_rebuild()
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
"""Output parsers using Pydantic."""
import json
from typing import Annotated, Generic, Optional
import pydantic
from pydantic import SkipValidation
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.pydantic import (
PYDANTIC_MAJOR_VERSION,
PydanticBaseModel,
TBaseModel,
)
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
"""Parse an output using a pydantic model."""
pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore
"""The pydantic model to parse."""
def _parse_obj(self, obj: dict) -> TBaseModel:
if PYDANTIC_MAJOR_VERSION == 2:
try:
if issubclass(self.pydantic_object, pydantic.BaseModel):
return self.pydantic_object.model_validate(obj)
elif issubclass(self.pydantic_object, pydantic.v1.BaseModel):
return self.pydantic_object.parse_obj(obj)
else:
msg = f"Unsupported model version for PydanticOutputParser: \
{self.pydantic_object.__class__}"
raise OutputParserException(msg)
except (pydantic.ValidationError, pydantic.v1.ValidationError) as e:
raise self._parser_exception(e, obj) from e
else: # pydantic v1
try:
return self.pydantic_object.parse_obj(obj)
except pydantic.ValidationError as e:
raise self._parser_exception(e, obj) from e
def _parser_exception(
self, e: Exception, json_object: dict
) -> OutputParserException:
json_string = json.dumps(json_object)
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {json_string}. Got: {e}"
return OutputParserException(msg, llm_output=json_string)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Optional[TBaseModel]:
"""Parse the result of an LLM call to a pydantic object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
Defaults to False.
Returns:
The parsed pydantic object.
"""
try:
json_object = super().parse_result(result)
return self._parse_obj(json_object)
except OutputParserException:
if partial:
return None
raise
def parse(self, text: str) -> TBaseModel:
"""Parse the output of an LLM call to a pydantic object.
Args:
text: The output of the LLM call.
Returns:
The parsed pydantic object.
"""
return super().parse(text)
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self.pydantic_object.model_json_schema().items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return _PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "pydantic"
@property
@override
def OutputType(self) -> type[TBaseModel]:
"""Return the pydantic model."""
return self.pydantic_object
PydanticOutputParser.model_rebuild()
_PYDANTIC_FORMAT_INSTRUCTIONS = """The output should be formatted as a JSON instance that conforms to the JSON schema below.
As an example, for the schema {{"properties": {{"foo": {{"title": "Foo", "description": "a list of strings", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}
the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema. The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Here is the output schema:
```
{schema}
```""" # noqa: E501
# Re-exporting types for backwards compatibility
__all__ = [
"PydanticBaseModel",
"PydanticOutputParser",
"TBaseModel",
]
|
from langchain_anthropic.chat_models import (
ChatAnthropic,
ChatAnthropicMessages,
convert_to_anthropic_tool,
)
from langchain_anthropic.llms import Anthropic, AnthropicLLM
__all__ = [
"Anthropic",
"AnthropicLLM",
"ChatAnthropic",
"ChatAnthropicMessages",
"convert_to_anthropic_tool",
]
|
from langchain_anthropic.chat_models import (
ChatAnthropic,
ChatAnthropicMessages,
convert_to_anthropic_tool,
)
from langchain_anthropic.llms import Anthropic, AnthropicLLM
__all__ = [
"ChatAnthropicMessages",
"ChatAnthropic",
"convert_to_anthropic_tool",
"Anthropic",
"AnthropicLLM",
]
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin, TypeMap
from ....helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='TEXT', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='integer', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
if self._config.columns is None:
self._config.columns = []
for i in range(len(self._config.columns)):
self._config.columns[i] = (
self._config.columns[i][0],
self._map_type(self._config.columns[i][1]),
)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from ..base.backend import BaseBackendMixin
from ....helper import dataclass_from_dict, filter_dict
if TYPE_CHECKING:
from ....typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {'str': 'TEXT', 'float': 'float', 'int': 'integer'}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from ....math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
**kwargs,
):
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
if self._config.columns is None:
self._config.columns = []
for i in range(len(self._config.columns)):
self._config.columns[i] = (
self._config.columns[i][0],
self._map_type(self._config.columns[i][1]),
)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
from .... import Document
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
@LOSSES.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
alpha=2.0,
gamma=4.0,
reduction='mean',
loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
|
import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
@LOSSES.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
alpha=2.0,
gamma=4.0,
reduction='mean',
loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseAnglELoss(model), document_regularizer_weight=5e-5, use_document_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAnglELoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseAnglELoss(model), corpus_regularizer_weight=5e-5, use_corpus_regularizer_only=True
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseAnglELoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
from langchain_community.document_loaders import BiliBiliLoader
def test_bilibili_loader() -> None:
"""Test Bilibili Loader."""
loader = BiliBiliLoader(
[
"https://www.bilibili.com/video/BV1xt411o7Xu/",
"https://www.bilibili.com/video/av330407025/",
"https://www.bilibili.com/video/BV16b4y1R7wP/?p=5",
]
)
docs = loader.load()
assert len(docs) == 3
assert docs[0].metadata["aid"] == 34218168
assert docs[1].metadata["videos"] == 1
assert docs[2].metadata["pages"][5 - 1]["cid"] == 300059803
assert docs[2].metadata["cid"] == 300048569
|
from langchain_community.document_loaders import BiliBiliLoader
def test_bilibili_loader() -> None:
"""Test Bilibili Loader."""
loader = BiliBiliLoader(
[
"https://www.bilibili.com/video/BV1xt411o7Xu/",
"https://www.bilibili.com/video/av330407025/",
]
)
docs = loader.load()
assert len(docs) == 2
assert docs[0].metadata["aid"] == 34218168
assert docs[1].metadata["videos"] == 1
|
"""
=====================================================
MNIST classification using multinomial logistic + L1
=====================================================
Here we fit a multinomial logistic regression with L1 penalty on a subset of
the MNIST digits classification task. We use the SAGA algorithm for this
purpose: this a solver that is fast when the number of samples is significantly
larger than the number of features and is able to finely optimize non-smooth
objective functions which is the case with the l1-penalty. Test accuracy
reaches > 0.8, while weight vectors remains *sparse* and therefore more easily
*interpretable*.
Note that this accuracy of this l1-penalized linear model is significantly
below what can be reached by an l2-penalized linear model or a non-linear
multi-layer perceptron model on this dataset.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
# Turn down for faster convergence
t0 = time.time()
train_samples = 5000
# Load data from https://www.openml.org/d/554
X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False)
random_state = check_random_state(0)
permutation = random_state.permutation(X.shape[0])
X = X[permutation]
y = y[permutation]
X = X.reshape((X.shape[0], -1))
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_samples, test_size=10000
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Turn up tolerance for faster convergence
clf = LogisticRegression(C=50.0 / train_samples, penalty="l1", solver="saga", tol=0.1)
clf.fit(X_train, y_train)
sparsity = np.mean(clf.coef_ == 0) * 100
score = clf.score(X_test, y_test)
# print('Best C % .4f' % clf.C_)
print("Sparsity with L1 penalty: %.2f%%" % sparsity)
print("Test score with L1 penalty: %.4f" % score)
coef = clf.coef_.copy()
plt.figure(figsize=(10, 5))
scale = np.abs(coef).max()
for i in range(10):
l1_plot = plt.subplot(2, 5, i + 1)
l1_plot.imshow(
coef[i].reshape(28, 28),
interpolation="nearest",
cmap=plt.cm.RdBu,
vmin=-scale,
vmax=scale,
)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l1_plot.set_xlabel(f"Class {i}")
plt.suptitle("Classification vector for...")
run_time = time.time() - t0
print("Example run in %.3f s" % run_time)
plt.show()
|
"""
=====================================================
MNIST classification using multinomial logistic + L1
=====================================================
Here we fit a multinomial logistic regression with L1 penalty on a subset of
the MNIST digits classification task. We use the SAGA algorithm for this
purpose: this a solver that is fast when the number of samples is significantly
larger than the number of features and is able to finely optimize non-smooth
objective functions which is the case with the l1-penalty. Test accuracy
reaches > 0.8, while weight vectors remains *sparse* and therefore more easily
*interpretable*.
Note that this accuracy of this l1-penalized linear model is significantly
below what can be reached by an l2-penalized linear model or a non-linear
multi-layer perceptron model on this dataset.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
# Turn down for faster convergence
t0 = time.time()
train_samples = 5000
# Load data from https://www.openml.org/d/554
X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False)
random_state = check_random_state(0)
permutation = random_state.permutation(X.shape[0])
X = X[permutation]
y = y[permutation]
X = X.reshape((X.shape[0], -1))
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_samples, test_size=10000
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Turn up tolerance for faster convergence
clf = LogisticRegression(C=50.0 / train_samples, penalty="l1", solver="saga", tol=0.1)
clf.fit(X_train, y_train)
sparsity = np.mean(clf.coef_ == 0) * 100
score = clf.score(X_test, y_test)
# print('Best C % .4f' % clf.C_)
print("Sparsity with L1 penalty: %.2f%%" % sparsity)
print("Test score with L1 penalty: %.4f" % score)
coef = clf.coef_.copy()
plt.figure(figsize=(10, 5))
scale = np.abs(coef).max()
for i in range(10):
l1_plot = plt.subplot(2, 5, i + 1)
l1_plot.imshow(
coef[i].reshape(28, 28),
interpolation="nearest",
cmap=plt.cm.RdBu,
vmin=-scale,
vmax=scale,
)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l1_plot.set_xlabel("Class %i" % i)
plt.suptitle("Classification vector for...")
run_time = time.time() - t0
print("Example run in %.3f s" % run_time)
plt.show()
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
all inputs is unchanged.
Note that the `Dropout` layer only applies when `training` is set to `True`
in `call()`, such that no values are dropped during inference.
When using `model.fit`, `training` will be appropriately set to `True`
automatically. In other contexts, you can set the argument explicitly
to `True` when calling the layer.
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
`trainable` does not affect the layer's behavior, as `Dropout` does
not have any variables/weights that can be frozen during training.)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
all inputs is unchanged.
Note that the `Dropout` layer only applies when `training` is set to `True`
in `call()`, such that no values are dropped during inference.
When using `model.fit`, `training` will be appropriately set to `True`
automatically. In other contexts, you can set the argument explicitly
to `True` when calling the layer.
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
`trainable` does not affect the layer's behavior, as `Dropout` does
not have any variables/weights that can be frozen during training.)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmdet.registry import TASK_UTILS
IOU_CALCULATORS = TASK_UTILS
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
warnings.warn(
'``build_iou_calculator`` would be deprecated soon, please use '
'``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
IOU_CALCULATORS = Registry('IoU calculator')
def build_iou_calculator(cfg, default_args=None):
"""Builder of IoU calculator."""
return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
|
from typing import List, Optional
import numpy as np
import pytest
from docarray import DocList
from docarray.base_doc.doc import BaseDoc
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs):
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
def test_nested_to_json(nested_docs):
d = nested_docs.json()
nested_docs.__class__.parse_raw(d)
|
from typing import List, Optional
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
def test_equal_nested_docs():
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
assert nested_docs == nested_docs
@pytest.fixture
def nested_docs():
class SimpleDoc(BaseDoc):
simple_tens: NdArray[10]
class NestedDoc(BaseDoc):
docs: DocList[SimpleDoc]
hello: str = 'world'
nested_docs = NestedDoc(
docs=DocList[SimpleDoc]([SimpleDoc(simple_tens=np.ones(10)) for j in range(2)]),
)
return nested_docs
def test_nested_to_dict(nested_docs):
d = nested_docs.dict()
assert (d['docs'][0]['simple_tens'] == np.ones(10)).all()
def test_nested_to_dict_exclude(nested_docs):
d = nested_docs.dict(exclude={'docs'})
assert 'docs' not in d.keys()
def test_nested_to_dict_exclude_set(nested_docs):
d = nested_docs.dict(exclude={'hello'})
assert 'hello' not in d.keys()
def test_nested_to_dict_exclude_dict(nested_docs): # doto change
d = nested_docs.dict(exclude={'hello': True})
assert 'hello' not in d.keys()
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from collections.abc import Mapping
from operator import itemgetter
from typing import Any, Callable, Optional, Union
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_core.runnables import RouterRunnable, Runnable
from langchain_core.runnables.base import RunnableBindingBase
from typing_extensions import TypedDict
class OpenAIFunction(TypedDict):
"""A function description for ChatOpenAI"""
name: str
"""The name of the function."""
description: str
"""The description of the function."""
parameters: dict
"""The parameters to the function."""
class OpenAIFunctionsRouter(RunnableBindingBase[BaseMessage, Any]):
"""A runnable that routes to the selected function."""
functions: Optional[list[OpenAIFunction]]
def __init__(
self,
runnables: Mapping[
str,
Union[
Runnable[dict, Any],
Callable[[dict], Any],
],
],
functions: Optional[list[OpenAIFunction]] = None,
):
if functions is not None:
if len(functions) != len(runnables):
msg = "The number of functions does not match the number of runnables."
raise ValueError(msg)
if not all(func["name"] in runnables for func in functions):
msg = "One or more function names are not found in runnables."
raise ValueError(msg)
router = (
JsonOutputFunctionsParser(args_only=False)
| {"key": itemgetter("name"), "input": itemgetter("arguments")}
| RouterRunnable(runnables)
)
super().__init__(bound=router, kwargs={}, functions=functions)
|
from collections.abc import Mapping
from operator import itemgetter
from typing import Any, Callable, Optional, Union
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
from langchain_core.runnables import RouterRunnable, Runnable
from langchain_core.runnables.base import RunnableBindingBase
from typing_extensions import TypedDict
class OpenAIFunction(TypedDict):
"""A function description for ChatOpenAI"""
name: str
"""The name of the function."""
description: str
"""The description of the function."""
parameters: dict
"""The parameters to the function."""
class OpenAIFunctionsRouter(RunnableBindingBase[BaseMessage, Any]):
"""A runnable that routes to the selected function."""
functions: Optional[list[OpenAIFunction]]
def __init__(
self,
runnables: Mapping[
str,
Union[
Runnable[dict, Any],
Callable[[dict], Any],
],
],
functions: Optional[list[OpenAIFunction]] = None,
):
if functions is not None:
if len(functions) != len(runnables):
raise ValueError(
"The number of functions does not match the number of runnables."
)
if not all(func["name"] in runnables for func in functions):
raise ValueError(
"One or more function names are not found in runnables."
)
router = (
JsonOutputFunctionsParser(args_only=False)
| {"key": itemgetter("name"), "input": itemgetter("arguments")}
| RouterRunnable(runnables)
)
super().__init__(bound=router, kwargs={}, functions=functions)
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = sox_ext.get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = sox_ext.load_audio_file(str(uri), frame_offset, num_frames, normalize, channels_first, format)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
compression: Optional[Union[torchaudio.io.CodecConfig, float, int]] = None,
) -> None:
if not isinstance(compression, (float, int, type(None))):
raise ValueError(
"SoX backend expects non-`None` value for argument `compression` to be of ",
f"type `float` or `int`, but received value of type {type(compression)}",
)
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
sox_ext.save_audio_file(
str(uri),
src,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
import torchaudio
from .backend import Backend
from .common import AudioMetaData
sox_ext = torchaudio._extension.lazy_import_sox_ext()
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = sox_ext.get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = sox_ext.load_audio_file(uri, frame_offset, num_frames, normalize, channels_first, format)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
compression: Optional[Union[torchaudio.io.CodecConfig, float, int]] = None,
) -> None:
if not isinstance(compression, (float, int, type(None))):
raise ValueError(
"SoX backend expects non-`None` value for argument `compression` to be of ",
f"type `float` or `int`, but received value of type {type(compression)}",
)
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
sox_ext.save_audio_file(
uri,
src,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner) -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner) -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self,
runner: object,
data_batch: Optional[Sequence[BaseDataSample]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Empty cache after an iteration.
Args:
runner (object): The runner of the training process.
data_batch (Sequence[BaseDataSample]): Data from dataloader.
Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner: object) -> None:
"""Empty cache before an epoch.
Args:
runner (object): The runner of the training process.
"""
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner: object) -> None:
"""Empty cache after an epoch.
Args:
runner (object): The runner of the training process.
"""
if self._after_epoch:
torch.cuda.empty_cache()
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold() -> None:
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
best_f1,
best_precision,
best_recall,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold() -> None:
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
max_acc,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model)
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
def test_ParaphraseMiningEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = paraphrase_distilroberta_base_v1_model
sentences = {
0: "Hello World",
1: "Hello World!",
2: "The cat is on the table",
3: "On the table the cat is",
}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0, 1), (2, 3)])
metrics = data_eval(model)
assert metrics[data_eval.primary_metric] > 0.99
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
import csv
import gzip
import os
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_BinaryClassificationEvaluator_find_best_f1_and_threshold() -> None:
"""Tests that the F1 score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
best_f1,
best_precision,
best_recall,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_f1_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_f1score = f1_score(y_true, y_pred_labels)
assert np.abs(best_f1 - sklearn_f1score) < 1e-6
def test_BinaryClassificationEvaluator_find_best_accuracy_and_threshold() -> None:
"""Tests that the Acc score for the computed threshold is correct"""
y_true = np.random.randint(0, 2, 1000)
y_pred_cosine = np.random.randn(1000)
(
max_acc,
threshold,
) = evaluation.BinaryClassificationEvaluator.find_best_acc_and_threshold(
y_pred_cosine, y_true, high_score_more_similar=True
)
y_pred_labels = [1 if pred >= threshold else 0 for pred in y_pred_cosine]
sklearn_acc = accuracy_score(y_true, y_pred_labels)
assert np.abs(max_acc - sklearn_acc) < 1e-6
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model)
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
def test_ParaphraseMiningEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""Tests that the ParaphraseMiningEvaluator can be loaded"""
model = paraphrase_distilroberta_base_v1_model
sentences = {
0: "Hello World",
1: "Hello World!",
2: "The cat is on the table",
3: "On the table the cat is",
}
data_eval = evaluation.ParaphraseMiningEvaluator(sentences, [(0, 1), (2, 3)])
metrics = data_eval(model)
assert metrics[data_eval.primary_metric] > 0.99
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
assert isinstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py')
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
losses = detector.forward(batch_inputs, data_samples, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
batch_results = detector.forward(
batch_inputs, data_samples, mode='tensor')
assert isinstance(batch_results, tuple)
|
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
|
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
def is_torch_available():
return torch_imported
|
import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Flow
_DIM = 10
@pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher'])
def test_index_search_flow(uses: str, build_docker_image: str):
f = Flow().add(uses=uses, uses_with={'metric': 'euclidean', 'dim': _DIM})
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
with f:
f.index(da)
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['euclidean'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['euclidean'].value == 10.0
def test_save_load(tmp_path):
f = Flow().add(
name='hnsw',
uses=HnswlibSearcher,
uses_with={'metric': 'euclidean', 'dim': _DIM},
)
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
# Index and save
with f:
f.index(da)
f.post(
on='/dump',
target_peapod='hnsw',
parameters={
'dump_path': str(tmp_path),
},
)
# Sanity check - without "dump_path" specified, index is empty
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 0
assert status_ind['count_indexed'] == 0
# Load
f = Flow().add(
name='hnsw',
uses=HnswlibSearcher,
uses_with={'metric': 'euclidean', 'dim': _DIM, 'dump_path': str(tmp_path)},
)
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
# Check that we indeed have same items in index
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
print(result_search)
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['euclidean'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['euclidean'].value == 10.0
def test_search_limit(tmp_path):
f = Flow().add(name='hnsw', uses=HnswlibSearcher, uses_with={'dim': _DIM})
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
# Index
with f:
f.index(da)
# Search by specifying limit
result_search = f.search(da, return_results=True, parameters={'limit': 1})
for doc in result_search[0].docs:
assert len(doc.matches) == 1
|
import numpy as np
import pytest
from hnswlib_searcher import HnswlibSearcher
from jina import Document, DocumentArray, Flow
_DIM = 10
@pytest.mark.parametrize('uses', ['HnswlibSearcher', 'docker://hnswlibsearcher'])
def test_index_search_flow(uses: str, build_docker_image: str):
f = Flow().add(uses=uses, uses_with={'metric': 'euclidean', 'dim': _DIM})
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
with f:
f.index(da)
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['euclidean'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['euclidean'].value == 10.0
def test_save_load(tmp_path):
f = Flow().add(
name='hnsw',
uses=HnswlibSearcher,
uses_with={'metric': 'euclidean', 'dim': _DIM},
)
da = DocumentArray(
[
Document(id='a', embedding=np.ones(_DIM) * 1.0),
Document(id='b', embedding=np.ones(_DIM) * 2.0),
]
)
# Index and save
with f:
f.index(da)
f.post(
on='/dump',
target_peapod='hnsw',
parameters={
'dump_path': str(tmp_path),
},
)
# Sanity check - without "dump_path" specified, index is empty
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 0
assert status_ind['count_indexed'] == 0
# Load
f = Flow().add(
name='hnsw',
uses=HnswlibSearcher,
uses_with={'metric': 'euclidean', 'dim': _DIM, 'dump_path': str(tmp_path)},
)
with f:
status_ind = f.post('/status', return_results=True)
status_ind = status_ind[0].data.docs[0].tags
assert status_ind['count_active'] == 2
assert status_ind['count_deleted'] == 0
assert status_ind['count_indexed'] == 2
# Check that we indeed have same items in index
result_search = f.search(da, return_results=True)
result_search = result_search[0].data.docs
assert len(result_search) == 2
print(result_search)
for ind in range(2):
assert result_search[ind].matches[0].id == ('a' if ind == 0 else 'b')
assert result_search[ind].matches[0].scores['euclidean'].value == 0.0
assert result_search[ind].matches[1].id == ('b' if ind == 0 else 'a')
assert result_search[ind].matches[1].scores['euclidean'].value == 10.0
|
from __future__ import annotations
from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSBenchmarkDataReader, STSDataReader
from .TripletReader import TripletReader
__all__ = [
"InputExample",
"LabelSentenceReader",
"NLIDataReader",
"STSDataReader",
"STSBenchmarkDataReader",
"TripletReader",
]
|
from .InputExample import InputExample
from .LabelSentenceReader import LabelSentenceReader
from .NLIDataReader import NLIDataReader
from .STSDataReader import STSBenchmarkDataReader, STSDataReader
from .TripletReader import TripletReader
__all__ = [
"InputExample",
"LabelSentenceReader",
"NLIDataReader",
"STSDataReader",
"STSBenchmarkDataReader",
"TripletReader",
]
|
import os
import platform
import tempfile
import pytest
from sentence_transformers import CrossEncoder, SentenceTransformer
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import DatasetDict, load_dataset
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture(scope="session")
def stsb_dataset_dict() -> "DatasetDict":
return load_dataset("mteb/stsbenchmark-sts")
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
This is only required for Ubuntu, as we otherwise have disk space issues there.
"""
if os.environ.get("CI", None) and platform.system() == "Linux":
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
import os
import platform
import tempfile
import pytest
from sentence_transformers import SentenceTransformer, CrossEncoder
from sentence_transformers.models import Transformer, Pooling
@pytest.fixture()
def stsb_bert_tiny_model() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def stsb_bert_tiny_model_reused() -> SentenceTransformer:
return SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture()
def paraphrase_distilroberta_base_v1_model() -> SentenceTransformer:
return SentenceTransformer("paraphrase-distilroberta-base-v1")
@pytest.fixture()
def distilroberta_base_ce_model() -> CrossEncoder:
return CrossEncoder("distilroberta-base", num_labels=1)
@pytest.fixture()
def clip_vit_b_32_model() -> SentenceTransformer:
return SentenceTransformer("clip-ViT-B-32")
@pytest.fixture()
def distilbert_base_uncased_model() -> SentenceTransformer:
word_embedding_model = Transformer("distilbert-base-uncased")
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
return model
@pytest.fixture()
def cache_dir():
"""
In the CI environment, we use a temporary directory as `cache_dir`
to avoid keeping the downloaded models on disk after the test.
This is only required for Ubuntu, as we otherwise have disk space issues there.
"""
if os.environ.get("CI", None) and platform.system() == "Linux":
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield None
|
from jina.clients.helper import callback_exec
from jina.proto import jina_pb2_grpc
class StreamRpc:
"""Class that encapsulated the methods required to run a stream rpc call from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
**kwargs
):
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def stream_rpc_with_retry(self):
"""Wraps the stream rpc logic with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaRPCStub(self.channel)
async for resp in stub.Call(
self.req_iter,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
):
callback_exec(
response=resp,
logger=self.logger,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
)
if self.show_progress:
self.p_bar.update()
yield resp
|
from jina.clients.helper import callback_exec
from jina.proto import jina_pb2_grpc
class StreamRpc:
"""Class that encapsulated the methods required to run a stream rpc call from the client. Instantiate a single class
for each client request.
"""
def __init__(
self,
channel,
continue_on_error,
metadata,
on_always,
on_done,
on_error,
p_bar,
req_iter,
max_attempts,
backoff_multiplier,
initial_backoff,
max_backoff,
logger,
show_progress,
compression,
**kwargs
):
self.compression = compression
self.show_progress = show_progress
self.logger = logger
self.max_backoff = max_backoff
self.initial_backoff = initial_backoff
self.backoff_multiplier = backoff_multiplier
self.max_attempts = max_attempts
self.req_iter = req_iter
self.p_bar = p_bar
self.on_error = on_error
self.on_done = on_done
self.on_always = on_always
self.metadata = metadata
self.continue_on_error = continue_on_error
self.channel = channel
self.kwargs = kwargs
async def stream_rpc_with_retry(self):
"""Wraps the stream rpc logic with retry loop based on the retry params.
:yields: Responses received from the target.
"""
stub = jina_pb2_grpc.JinaRPCStub(self.channel)
async for resp in stub.Call(
self.req_iter,
compression=self.compression,
metadata=self.metadata,
credentials=self.kwargs.get('credentials', None),
timeout=self.kwargs.get('timeout', None),
):
callback_exec(
response=resp,
on_error=self.on_error,
on_done=self.on_done,
on_always=self.on_always,
continue_on_error=self.continue_on_error,
logger=self.logger,
)
if self.show_progress:
self.p_bar.update()
yield resp
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.11.0',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from __future__ import annotations
import difflib
from pathlib import Path
import pytest
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_runner.folder import Folder
pytest.importorskip("gritql")
def find_issue(current: Folder, expected: Folder) -> str:
for current_file, expected_file in zip(current.files, expected.files):
if current_file != expected_file:
if current_file.name != expected_file.name:
return (
f"Files have "
f"different names: {current_file.name} != {expected_file.name}"
)
if isinstance(current_file, Folder) and isinstance(expected_file, Folder):
return find_issue(current_file, expected_file)
if isinstance(current_file, Folder) or isinstance(expected_file, Folder):
return (
f"One of the files is a "
f"folder: {current_file.name} != {expected_file.name}"
)
return "\n".join(
difflib.unified_diff(
current_file.content.splitlines(),
expected_file.content.splitlines(),
fromfile=current_file.name,
tofile=expected_file.name,
),
)
return "Unknown"
@pytest.mark.xfail(reason="grit may not be installed in env")
def test_command_line(tmp_path: Path) -> None:
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_path) as td:
before.create_structure(root=Path(td))
# The input is used to force through the confirmation.
result = runner.invoke(app, ["migrate", before.name, "--force"])
if result.exit_code != 0:
raise RuntimeError(result.output)
after = Folder.from_structure(Path(td) / before.name)
if after != expected:
raise ValueError(find_issue(after, expected))
|
from __future__ import annotations
import difflib
from pathlib import Path
import pytest
from typer.testing import CliRunner
from langchain_cli.cli import app
from tests.unit_tests.migrate.cli_runner.cases import before, expected
from tests.unit_tests.migrate.cli_runner.folder import Folder
pytest.importorskip("gritql")
def find_issue(current: Folder, expected: Folder) -> str:
for current_file, expected_file in zip(current.files, expected.files):
if current_file != expected_file:
if current_file.name != expected_file.name:
return (
f"Files have "
f"different names: {current_file.name} != {expected_file.name}"
)
if isinstance(current_file, Folder) and isinstance(expected_file, Folder):
return find_issue(current_file, expected_file)
if isinstance(current_file, Folder) or isinstance(expected_file, Folder):
return (
f"One of the files is a "
f"folder: {current_file.name} != {expected_file.name}"
)
return "\n".join(
difflib.unified_diff(
current_file.content.splitlines(),
expected_file.content.splitlines(),
fromfile=current_file.name,
tofile=expected_file.name,
)
)
return "Unknown"
@pytest.mark.xfail(reason="grit may not be installed in env")
def test_command_line(tmp_path: Path) -> None:
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_path) as td:
before.create_structure(root=Path(td))
# The input is used to force through the confirmation.
result = runner.invoke(app, ["migrate", before.name, "--force"])
assert result.exit_code == 0, result.output
after = Folder.from_structure(Path(td) / before.name)
assert after == expected, find_issue(after, expected)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.6'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.27.5'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.response_chain import (
RESPONSE_TEMPLATE,
APIResponderChain,
APIResponderOutputParser,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"APIResponderChain": "langchain_community.chains.openapi.response_chain",
"APIResponderOutputParser": "langchain_community.chains.openapi.response_chain",
"RESPONSE_TEMPLATE": "langchain_community.chains.openapi.response_chain",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["RESPONSE_TEMPLATE", "APIResponderChain", "APIResponderOutputParser"]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.response_chain import (
RESPONSE_TEMPLATE,
APIResponderChain,
APIResponderOutputParser,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"APIResponderChain": "langchain_community.chains.openapi.response_chain",
"APIResponderOutputParser": "langchain_community.chains.openapi.response_chain",
"RESPONSE_TEMPLATE": "langchain_community.chains.openapi.response_chain",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["APIResponderChain", "APIResponderOutputParser", "RESPONSE_TEMPLATE"]
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.openvino import core
from keras.src.backend.openvino import image
from keras.src.backend.openvino import linalg
from keras.src.backend.openvino import math
from keras.src.backend.openvino import nn
from keras.src.backend.openvino import numpy
from keras.src.backend.openvino import random
from keras.src.backend.openvino.core import IS_THREAD_SAFE
from keras.src.backend.openvino.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.openvino.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.openvino.core import Variable
from keras.src.backend.openvino.core import cast
from keras.src.backend.openvino.core import compute_output_spec
from keras.src.backend.openvino.core import cond
from keras.src.backend.openvino.core import convert_to_numpy
from keras.src.backend.openvino.core import convert_to_tensor
from keras.src.backend.openvino.core import is_tensor
from keras.src.backend.openvino.core import random_seed_dtype
from keras.src.backend.openvino.core import shape
from keras.src.backend.openvino.core import vectorized_map
from keras.src.backend.openvino.rnn import cudnn_ok
from keras.src.backend.openvino.rnn import gru
from keras.src.backend.openvino.rnn import lstm
from keras.src.backend.openvino.rnn import rnn
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.openvino import core
from keras.src.backend.openvino import image
from keras.src.backend.openvino import linalg
from keras.src.backend.openvino import math
from keras.src.backend.openvino import nn
from keras.src.backend.openvino import numpy
from keras.src.backend.openvino import random
from keras.src.backend.openvino.core import IS_THREAD_SAFE
from keras.src.backend.openvino.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.openvino.core import Variable
from keras.src.backend.openvino.core import cast
from keras.src.backend.openvino.core import compute_output_spec
from keras.src.backend.openvino.core import cond
from keras.src.backend.openvino.core import convert_to_numpy
from keras.src.backend.openvino.core import convert_to_tensor
from keras.src.backend.openvino.core import is_tensor
from keras.src.backend.openvino.core import random_seed_dtype
from keras.src.backend.openvino.core import shape
from keras.src.backend.openvino.core import vectorized_map
from keras.src.backend.openvino.rnn import cudnn_ok
from keras.src.backend.openvino.rnn import gru
from keras.src.backend.openvino.rnn import lstm
from keras.src.backend.openvino.rnn import rnn
|
from torchaudio_unittest.common_utils import PytorchTestCase
from .autograd_test_impl import AutogradTestFloat32, AutogradTestMixin
class AutogradCPUTest(AutogradTestMixin, PytorchTestCase):
device = "cpu"
class AutogradRNNTCPUTest(AutogradTestFloat32, PytorchTestCase):
device = "cpu"
|
from torchaudio_unittest.common_utils import PytorchTestCase
from .autograd_test_impl import AutogradTestMixin, AutogradTestFloat32
class AutogradCPUTest(AutogradTestMixin, PytorchTestCase):
device = "cpu"
class AutogradRNNTCPUTest(AutogradTestFloat32, PytorchTestCase):
device = "cpu"
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio()
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio()
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio()
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio()
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await asyncio.gather(stream_1, stream_2)
@pytest.mark.asyncio()
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert handler_2.ctx
assert await handler_2.ctx.get("cur_count") == 2
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.session.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio()
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio()
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio()
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio()
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await asyncio.gather(stream_1, stream_2)
@pytest.mark.asyncio()
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert await handler_2.ctx.get("cur_count") == 2
|
from typing import List, Optional
from docarray.base_doc.doc import BaseDoc
def test_base_document_init():
doc = BaseDoc()
assert doc.id is not None
def test_update():
class MyDocument(BaseDoc):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
|
from typing import Optional, List
from docarray.base_document.document import BaseDocument
def test_base_document_init():
doc = BaseDocument()
assert doc.id is not None
def test_update():
class MyDocument(BaseDocument):
content: str
title: Optional[str] = None
tags_: List
doc1 = MyDocument(
content='Core content of the document', title='Title', tags_=['python', 'AI']
)
doc2 = MyDocument(content='Core content updated', tags_=['docarray'])
doc1.update(doc2)
assert doc1.content == 'Core content updated'
assert doc1.title == 'Title'
assert doc1.tags_ == ['python', 'AI', 'docarray']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.