input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='PanopticFPN',
preprocess_cfg=preprocess_cfg,
semantic_head=dict(
type='PanopticFPNHead',
num_things_classes=80,
num_stuff_classes=53,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
# Forced to remove NumClassCheckHook
custom_hooks = []
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
model = dict(
type='PanopticFPN',
img_norm_cfg=img_norm_cfg,
semantic_head=dict(
type='PanopticFPNHead',
num_things_classes=80,
num_stuff_classes=53,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
# Forced to remove NumClassCheckHook
custom_hooks = []
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_efficientnet import *
from .image_processing_efficientnet import *
from .image_processing_efficientnet_fast import *
from .modeling_efficientnet import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_efficientnet import *
from .image_processing_efficientnet import *
from .modeling_efficientnet import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_bytes(protocol, compress, show_progress):
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_base64(protocol, compress, show_progress):
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_bytes(protocol, compress, show_progress):
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_bytes(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].from_bytes(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_from_to_base64(protocol, compress, show_progress):
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
bytes_da = da.to_base64(
protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].from_base64(
bytes_da, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from mmengine.registry import build_model_from_cfg, build_runner_from_cfg
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
class MSELoss(nn.Module):
def __init__(self, model):
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Input:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MSELoss, self).__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from langchain_core.utils.json_schema import (
_dereference_refs_helper,
_infer_skip_keys,
_retrieve_ref,
dereference_refs,
)
__all__ = [
"_dereference_refs_helper",
"_infer_skip_keys",
"_retrieve_ref",
"dereference_refs",
]
|
from langchain_core.utils.json_schema import (
_dereference_refs_helper,
_infer_skip_keys,
_retrieve_ref,
dereference_refs,
)
__all__ = [
"_retrieve_ref",
"_dereference_refs_helper",
"_infer_skip_keys",
"dereference_refs",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .LambdaLoss import (
LambdaLoss,
LambdaRankScheme,
NDCGLoss1Scheme,
NDCGLoss2PPScheme,
NDCGLoss2Scheme,
NoWeighingScheme,
)
from .ListNetLoss import ListNetLoss
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
"ListNetLoss",
"LambdaLoss",
"NoWeighingScheme",
"NDCGLoss1Scheme",
"NDCGLoss2Scheme",
"LambdaRankScheme",
"NDCGLoss2PPScheme",
]
|
from __future__ import annotations
from .BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CrossEntropyLoss import CrossEntropyLoss
from .LambdaLoss import (
LambdaLoss,
LambdaRankScheme,
NDCGLoss1Scheme,
NDCGLoss2PPScheme,
NDCGLoss2Scheme,
NoWeighingScheme,
)
from .MarginMSELoss import MarginMSELoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
__all__ = [
"BinaryCrossEntropyLoss",
"CrossEntropyLoss",
"MultipleNegativesRankingLoss",
"CachedMultipleNegativesRankingLoss",
"MarginMSELoss",
"MSELoss",
"LambdaLoss",
"NoWeighingScheme",
"NDCGLoss1Scheme",
"NDCGLoss2Scheme",
"LambdaRankScheme",
"NDCGLoss2PPScheme",
]
|
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './faster_rcnn_hrnetv2p_w32_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = [
"BaseLanguageModel",
"BaseChatModel",
"SimpleChatModel",
"BaseLLM",
"LLM",
"LanguageModelInput",
"get_tokenizer",
"LangSmithParams",
"LanguageModelOutput",
"LanguageModelLike",
"FakeListLLM",
"FakeStreamingListLLM",
"FakeListChatModel",
"FakeMessagesListChatModel",
"GenericFakeChatModel",
"ParrotFakeChatModel",
]
|
"""**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = [
"BaseLanguageModel",
"BaseChatModel",
"SimpleChatModel",
"BaseLLM",
"LLM",
"LanguageModelInput",
"get_tokenizer",
"LangSmithParams",
"LanguageModelOutput",
"LanguageModelLike",
"FakeListLLM",
"FakeStreamingListLLM",
"FakeListChatModel",
"FakeMessagesListChatModel",
"GenericFakeChatModel",
"ParrotFakeChatModel",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .evaluator import Evaluator
from .metric import BaseMetric, DumpResults
from .utils import get_metric_value
__all__ = ['BaseMetric', 'Evaluator', 'get_metric_value', 'DumpResults']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .evaluator import Evaluator
from .metric import BaseMetric
from .utils import get_metric_value
__all__ = ['BaseMetric', 'Evaluator', 'get_metric_value']
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor'])
|
from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'Embedding',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor'])
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import ChatGPTLoader
from langchain_community.document_loaders.chatgpt import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_rows": "langchain_community.document_loaders.chatgpt",
"ChatGPTLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ChatGPTLoader",
"concatenate_rows",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import ChatGPTLoader
from langchain_community.document_loaders.chatgpt import concatenate_rows
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"concatenate_rows": "langchain_community.document_loaders.chatgpt",
"ChatGPTLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"concatenate_rows",
"ChatGPTLoader",
]
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluatorFromDataFrame
if TYPE_CHECKING:
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluatorDataFrame(MSEEvaluatorFromDataFrame):
def __init__(
self,
dataframe: list[dict[str, str]],
teacher_model: SparseEncoder,
combinations: list[tuple[str, str]],
batch_size: int = 8,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
):
super.__init__(dataframe, teacher_model, combinations, batch_size, name, write_csv, truncate_dim)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super.__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> np.ndarray:
return model.encode(
sentences,
batch_size=self.batch_size,
convert_to_numpy=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluatorFromDataFrame
if TYPE_CHECKING:
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluator(MSEEvaluatorFromDataFrame):
def __init__(
self,
dataframe: list[dict[str, str]],
teacher_model: SparseEncoder,
combinations: list[tuple[str, str]],
batch_size: int = 8,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
):
super.__init__(dataframe, teacher_model, combinations, batch_size, name, write_csv, truncate_dim)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super.__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> np.ndarray:
return model.encode(
sentences,
batch_size=self.batch_size,
convert_to_numpy=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100)))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
test_cfg=dict(
rcnn=dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_threshold=0.5),
max_per_img=100)))
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
|
import sys
import uuid
from typing import Any, Optional
from uuid import UUID
import pytest
from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.callbacks.manager import (
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.runnables import RunnableLambda
from langchain_core.runnables.config import RunnableConfig
class AsyncCustomCallbackHandler(AsyncCallbackHandler):
def __init__(self) -> None:
self.events: list[Any] = []
async def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> None:
assert kwargs == {}
self.events.append(
(
name,
data,
run_id,
tags,
metadata,
)
)
def test_custom_event_root_dispatch() -> None:
"""Test adhoc event in a nested chain."""
# This just tests that nothing breaks on the path.
# It shouldn't do anything at the moment, since the tracer isn't configured
# to handle adhoc events.
# Expected behavior is that the event cannot be dispatched
with pytest.raises(RuntimeError):
dispatch_custom_event("event1", {"x": 1})
async def test_async_custom_event_root_dispatch() -> None:
"""Test adhoc event in a nested chain."""
# This just tests that nothing breaks on the path.
# It shouldn't do anything at the moment, since the tracer isn't configured
# to handle adhoc events.
# Expected behavior is that the event cannot be dispatched
with pytest.raises(RuntimeError):
await adispatch_custom_event("event1", {"x": 1})
IS_GTE_3_11 = sys.version_info >= (3, 11)
@pytest.mark.skipif(not IS_GTE_3_11, reason="Requires Python >=3.11")
async def test_async_custom_event_implicit_config() -> None:
"""Test dispatch without passing config explicitly."""
callback = AsyncCustomCallbackHandler()
run_id = uuid.UUID(int=7)
# Typing not working well with RunnableLambda when used as
# a decorator for async functions
@RunnableLambda # type: ignore[arg-type]
async def foo(x: int, config: RunnableConfig) -> int:
assert "callbacks" in config
await adispatch_custom_event("event1", {"x": x})
await adispatch_custom_event("event2", {"x": x})
return x
await foo.ainvoke(
1, # type: ignore[arg-type]
{"callbacks": [callback], "run_id": run_id},
)
assert callback.events == [
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
]
async def test_async_callback_manager() -> None:
"""Test async callback manager."""
callback = AsyncCustomCallbackHandler()
run_id = uuid.UUID(int=7)
# Typing not working well with RunnableLambda when used as
# a decorator for async functions
@RunnableLambda # type: ignore[arg-type]
async def foo(x: int, config: RunnableConfig) -> int:
await adispatch_custom_event("event1", {"x": x}, config=config)
await adispatch_custom_event("event2", {"x": x}, config=config)
return x
await foo.ainvoke(
1, # type: ignore[arg-type]
{"callbacks": [callback], "run_id": run_id},
)
assert callback.events == [
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
]
def test_sync_callback_manager() -> None:
"""Test async callback manager."""
class CustomCallbackManager(BaseCallbackHandler):
def __init__(self) -> None:
self.events: list[Any] = []
def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> None:
assert kwargs == {}
self.events.append(
(
name,
data,
run_id,
tags,
metadata,
)
)
callback = CustomCallbackManager()
run_id = uuid.UUID(int=7)
@RunnableLambda
def foo(x: int, config: RunnableConfig) -> int:
dispatch_custom_event("event1", {"x": x})
dispatch_custom_event("event2", {"x": x}, config=config)
return x
foo.invoke(1, {"callbacks": [callback], "run_id": run_id})
assert callback.events == [
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
]
|
import sys
import uuid
from typing import Any, Optional
from uuid import UUID
import pytest
from langchain_core.callbacks import AsyncCallbackHandler, BaseCallbackHandler
from langchain_core.callbacks.manager import (
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.runnables import RunnableLambda
from langchain_core.runnables.config import RunnableConfig
class AsyncCustomCallbackHandler(AsyncCallbackHandler):
def __init__(self) -> None:
self.events: list[Any] = []
async def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> None:
assert kwargs == {}
self.events.append(
(
name,
data,
run_id,
tags,
metadata,
)
)
def test_custom_event_root_dispatch() -> None:
"""Test adhoc event in a nested chain."""
# This just tests that nothing breaks on the path.
# It shouldn't do anything at the moment, since the tracer isn't configured
# to handle adhoc events.
# Expected behavior is that the event cannot be dispatched
with pytest.raises(RuntimeError):
dispatch_custom_event("event1", {"x": 1})
async def test_async_custom_event_root_dispatch() -> None:
"""Test adhoc event in a nested chain."""
# This just tests that nothing breaks on the path.
# It shouldn't do anything at the moment, since the tracer isn't configured
# to handle adhoc events.
# Expected behavior is that the event cannot be dispatched
with pytest.raises(RuntimeError):
await adispatch_custom_event("event1", {"x": 1})
IS_GTE_3_11 = sys.version_info >= (3, 11)
@pytest.mark.skipif(not IS_GTE_3_11, reason="Requires Python >=3.11")
async def test_async_custom_event_implicit_config() -> None:
"""Test dispatch without passing config explicitly."""
callback = AsyncCustomCallbackHandler()
run_id = uuid.UUID(int=7)
# Typing not working well with RunnableLambda when used as
# a decorator for async functions
@RunnableLambda # type: ignore[arg-type]
async def foo(x: int, config: RunnableConfig) -> int:
await adispatch_custom_event("event1", {"x": x})
await adispatch_custom_event("event2", {"x": x})
return x
await foo.ainvoke(
1, # type: ignore[arg-type]
{"callbacks": [callback], "run_id": run_id},
)
assert callback.events == [
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
]
async def test_async_callback_manager() -> None:
"""Test async callback manager."""
callback = AsyncCustomCallbackHandler()
run_id = uuid.UUID(int=7)
# Typing not working well with RunnableLambda when used as
# a decorator for async functions
@RunnableLambda # type: ignore[arg-type]
async def foo(x: int, config: RunnableConfig) -> int:
await adispatch_custom_event("event1", {"x": x}, config=config)
await adispatch_custom_event("event2", {"x": x}, config=config)
return x
await foo.ainvoke(
1, # type: ignore[arg-type]
{"callbacks": [callback], "run_id": run_id},
)
assert callback.events == [
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
]
def test_sync_callback_manager() -> None:
"""Test async callback manager."""
class CustomCallbackManager(BaseCallbackHandler):
def __init__(self) -> None:
self.events: list[Any] = []
def on_custom_event(
self,
name: str,
data: Any,
*,
run_id: UUID,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> None:
assert kwargs == {}
self.events.append(
(
name,
data,
run_id,
tags,
metadata,
)
)
callback = CustomCallbackManager()
run_id = uuid.UUID(int=7)
@RunnableLambda
def foo(x: int, config: RunnableConfig) -> int:
dispatch_custom_event("event1", {"x": x})
dispatch_custom_event("event2", {"x": x}, config=config)
return x
foo.invoke(1, {"callbacks": [callback], "run_id": run_id})
assert callback.events == [
("event1", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
("event2", {"x": 1}, UUID("00000000-0000-0000-0000-000000000007"), [], {}),
]
|
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"exec",
"postgres-test",
"pg_isready",
"-U",
"postgres",
"-d",
"postgres",
],
check=True,
capture_output=True,
text=True,
)
if "accepting connections" in result.stdout:
print("PostgreSQL is ready.")
return True
except subprocess.CalledProcessError:
print(f"PostgreSQL is not ready yet. Retrying in {delay} seconds...")
time.sleep(delay)
print("Failed to connect to PostgreSQL.")
return False
def run_command(command, check=True):
try:
subprocess.run(command, check=check)
except subprocess.CalledProcessError as e:
print(f"Command failed: {e}")
sys.exit(1)
def test():
# Start PostgreSQL with Docker Compose
run_command(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"up",
"-d",
"postgres-test",
]
)
if not wait_for_postgres():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# Run Prisma migrations
run_command(["prisma", "migrate", "dev"])
# Run the tests
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(result.returncode)
|
import subprocess
import sys
import time
def wait_for_postgres(max_retries=5, delay=5):
for _ in range(max_retries):
try:
result = subprocess.run(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"exec",
"postgres-test",
"pg_isready",
"-U",
"agpt_user",
"-d",
"agpt_local",
],
check=True,
capture_output=True,
text=True,
)
if "accepting connections" in result.stdout:
print("PostgreSQL is ready.")
return True
except subprocess.CalledProcessError:
print(f"PostgreSQL is not ready yet. Retrying in {delay} seconds...")
time.sleep(delay)
print("Failed to connect to PostgreSQL.")
return False
def run_command(command, check=True):
try:
subprocess.run(command, check=check)
except subprocess.CalledProcessError as e:
print(f"Command failed: {e}")
sys.exit(1)
def test():
# Start PostgreSQL with Docker Compose
run_command(
[
"docker",
"compose",
"-f",
"docker-compose.test.yaml",
"up",
"-d",
"postgres-test",
]
)
if not wait_for_postgres():
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(1)
# Run Prisma migrations
run_command(["prisma", "migrate", "dev"])
# Run the tests
result = subprocess.run(["pytest"] + sys.argv[1:], check=False)
run_command(["docker", "compose", "-f", "docker-compose.test.yaml", "down"])
sys.exit(result.returncode)
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
class TestHuggingFaceEndpoint(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatHuggingFace
@property
def chat_model_params(self) -> dict:
llm = HuggingFaceEndpoint( # type: ignore[call-arg]
repo_id="Qwen/Qwen2.5-72B-Instruct",
task="conversational",
provider="fireworks-ai",
temperature=0,
)
return {"llm": llm}
@pytest.fixture
def model(self) -> BaseChatModel:
return self.chat_model_class(**self.chat_model_params) # type: ignore[call-arg]
@pytest.mark.xfail(
reason=("Overrding, testing only typed dict and json schema structured output")
)
@pytest.mark.parametrize("schema_type", ["typeddict", "json_schema"])
def test_structured_output(self, model: BaseChatModel, schema_type: str) -> None:
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(
reason=("Overrding, testing only typed dict and json schema structured output")
)
@pytest.mark.parametrize("schema_type", ["typeddict", "json_schema"])
async def test_structured_output_async(
self, model: BaseChatModel, schema_type: str
) -> None: # type: ignore[override]
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(reason=("Pydantic structured output is not supported"))
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
super().test_structured_output_pydantic_2_v1(model)
@pytest.mark.xfail(reason=("Pydantic structured output is not supported"))
def test_structured_output_optional_param(self, model: BaseChatModel) -> None:
super().test_structured_output_optional_param(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(
model, my_adder_tool=my_adder_tool
)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_few_shot_examples(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_structured_few_shot_examples(model, my_adder_tool=my_adder_tool)
@property
def has_tool_choice(self) -> bool:
return False
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
class TestHuggingFaceEndpoint(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatHuggingFace
@property
def chat_model_params(self) -> dict:
return {}
@pytest.fixture
def model(self) -> BaseChatModel:
llm = HuggingFaceEndpoint( # type: ignore[call-arg]
repo_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
max_new_tokens=512,
do_sample=False,
repetition_penalty=1.03,
)
return self.chat_model_class(llm=llm) # type: ignore[call-arg]
@pytest.mark.xfail(reason=("Not implemented"))
def test_stream(self, model: BaseChatModel) -> None:
super().test_stream(model)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_astream(self, model: BaseChatModel) -> None:
await super().test_astream(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_usage_metadata(self, model: BaseChatModel) -> None:
super().test_usage_metadata(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_usage_metadata_streaming(self, model: BaseChatModel) -> None:
super().test_usage_metadata_streaming(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_calling(self, model: BaseChatModel) -> None:
super().test_tool_calling(model)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
await super().test_tool_calling_async(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
super().test_tool_calling_with_no_arguments(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_bind_runnables_as_tools(self, model: BaseChatModel) -> None:
super().test_bind_runnables_as_tools(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output(self, model: BaseChatModel, schema_type: str) -> None:
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(reason=("Not implemented"))
async def test_structured_output_async(
self, model: BaseChatModel, schema_type: str
) -> None: # type: ignore[override]
super().test_structured_output(model, schema_type)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output_pydantic_2_v1(self, model: BaseChatModel) -> None:
super().test_structured_output_pydantic_2_v1(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_output_optional_param(self, model: BaseChatModel) -> None:
super().test_structured_output_optional_param(model)
@pytest.mark.xfail(reason=("Not implemented"))
def test_tool_message_histories_list_content(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_tool_message_histories_list_content(
model, my_adder_tool=my_adder_tool
)
@pytest.mark.xfail(reason=("Not implemented"))
def test_structured_few_shot_examples(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_structured_few_shot_examples(model, my_adder_tool=my_adder_tool)
|
from typing import Iterable, Dict
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayWeaviate``"""
def _getitem(self, wid: str) -> 'Document':
"""Helper method for getting item with weaviate as storage
:param wid: weaviate id
:raises KeyError: raise error when weaviate id does not exist in storage
:return: Document
"""
try:
resp = self._client.data_object.get_by_id(
wid, with_vector=True, class_name=self._class_name
)
return Document.from_base64(
resp['properties']['_serialized'], **self._serialize_config
)
except Exception as ex:
raise KeyError(wid) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from weaviate
"""
return self._getitem(self._map_id(_id))
def _set_doc_by_id(self, _id: str, value: 'Document', flush: bool = True):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
payload = self._doc2weaviate_create_payload(value)
self._client.batch.add_data_object(**payload)
if flush:
self._client.batch.flush()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc, flush=False)
self._client.batch.flush()
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._client.data_object.exists(
self._map_id(_id), class_name=self._class_name
):
self._client.data_object.delete(
self._map_id(_id), class_name=self._class_name
)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
if self._class_name:
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
self._load_or_create_weaviate_schema()
def _load_offset2ids(self):
if self._list_like:
ids, self._offset2ids_wid = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids, list_like=self._list_like)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
self._update_offset2ids_meta()
|
from typing import Iterable, Dict
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayWeaviate``"""
def _getitem(self, wid: str) -> 'Document':
"""Helper method for getting item with weaviate as storage
:param wid: weaviate id
:raises KeyError: raise error when weaviate id does not exist in storage
:return: Document
"""
try:
resp = self._client.data_object.get_by_id(
wid, with_vector=True, class_name=self._class_name
)
return Document.from_base64(
resp['properties']['_serialized'], **self._serialize_config
)
except Exception as ex:
raise KeyError(wid) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from weaviate
"""
return self._getitem(self._map_id(_id))
def _set_doc_by_id(self, _id: str, value: 'Document', flush: bool = True):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
payload = self._doc2weaviate_create_payload(value)
self._client.batch.add_data_object(**payload)
if flush:
self._client.batch.flush()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc, flush=False)
self._client.batch.flush()
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._client.data_object.exists(
self._map_id(_id), class_name=self._class_name
):
self._client.data_object.delete(
self._map_id(_id), class_name=self._class_name
)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
if self._class_name:
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
self._load_or_create_weaviate_schema()
def _load_offset2ids(self):
ids, self._offset2ids_wid = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if incompletly recieved file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, will get token from `~/.huggingface`.
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
ignore_url_params: bool = False
download_desc: Optional[str] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (:obj:`str` or :obj:`Path`, optional): Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (:obj:`bool`, default ``False``): If True, re-dowload the file even if it's already cached in
the cache dir.
resume_download (:obj:`bool`, default ``False``): If True, resume the download if incompletly recieved file is
found.
proxies (:obj:`dict`, optional):
user_agent (:obj:`str`, optional): Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (:obj:`bool`, default ``False``): If True and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (:obj:`bool`, default ``False``): If True when extract_compressed_file is True and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (:obj:`bool`, default ``False``): Whether to delete (or keep) the extracted files.
use_etag (:obj:`bool`, default ``True``): Whether to use the ETag HTTP response header to validate the cached files.
num_proc (:obj:`int`, optional): The number of processes to launch to download the files in parallel.
max_retries (:obj:`int`, default ``1``): The number of times to retry an HTTP request if it fails.
use_auth_token (:obj:`str` or :obj:`bool`, optional): Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If True, will get token from ~/.huggingface.
ignore_url_params (:obj:`bool`, default ``False``): Whether to strip all query parameters and #fragments from
the download URL before using it for caching the file.
download_desc (:obj:`str`, optional): A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
ignore_url_params: bool = False
download_desc: Optional[str] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.23.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from docarray.proto import DocProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
@pytest.mark.proto
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray._docarray_from_native(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocProto(data=data)
|
import numpy as np
import pytest
from docarray.proto import DocProto, NodeProto
from docarray.typing import NdArray
@pytest.mark.proto
def test_ndarray():
original_ndarray = np.zeros((3, 224, 224))
custom_ndarray = NdArray._docarray_from_native(original_ndarray)
tensor = NdArray.from_protobuf(custom_ndarray.to_protobuf())
assert (tensor == original_ndarray).all()
@pytest.mark.proto
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
ndarray = NdArray._docarray_from_native(np.zeros((3, 224, 224)))
nd_proto = ndarray.to_protobuf()
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocProto(data=data)
|
"""
Example of training with Dask on GPU
====================================
"""
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = dxgb.train(
client,
{
"verbosity": 2,
"tree_method": "hist",
# Golden line for GPU training
"device": "cuda",
},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X))
y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y))
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
)
prediction = dxgb.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=10000)
y = da.random.random(size=(m,), chunks=10000)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y)
|
"""
Example of training with Dask on GPU
====================================
"""
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = xgb.dask.train(
client,
{
"verbosity": 2,
"tree_method": "hist",
# Golden line for GPU training
"device": "cuda",
},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X))
y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y))
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = xgb.dask.train(
client,
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
)
prediction = xgb.dask.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=10000)
y = da.random.random(size=(m,), chunks=10000)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/2d_matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.save_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.save_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/2d_matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.save_to_hub(f"{model_name}-sts-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.save_to_hub('{model_name}-sts-matryoshka')`."
)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
T = TypeVar('T', bound='AudioJaxArray')
@_register_proto(proto_type_name='audio_jaxarray')
class AudioJaxArray(AbstractAudioTensor, JaxArray, metaclass=metaJax):
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.jaxarray import JaxArray, metaJax
T = TypeVar('T', bound='AudioJaxArray')
@_register_proto(proto_type_name='audio_jaxarray')
class AudioJaxArray(AbstractAudioTensor, JaxArray, metaclass=metaJax):
...
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from .folder import default_loader, find_classes, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Imagenette(VisionDataset):
"""`Imagenette <https://github.com/fastai/imagenette#imagenette-1>`_ image classification dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the Imagenette dataset.
split (string, optional): The dataset split. Supports ``"train"`` (default), and ``"val"``.
size (string, optional): The image size. Supports ``"full"`` (default), ``"320px"``, and ``"160px"``.
download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
downloaded archives are not downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image or torch.Tensor, depends on the given loader,
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class name, class index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (WordNet ID, class index).
"""
_ARCHIVES = {
"full": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz", "fe2fc210e6bb7c5664d602c3cd71e612"),
"320px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz", "3df6f0d01a2c9592104656642f5e78a3"),
"160px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-160.tgz", "e793b78cc4c9e9a4ccc0c1155377a412"),
}
_WNID_TO_CLASS = {
"n01440764": ("tench", "Tinca tinca"),
"n02102040": ("English springer", "English springer spaniel"),
"n02979186": ("cassette player",),
"n03000684": ("chain saw", "chainsaw"),
"n03028079": ("church", "church building"),
"n03394916": ("French horn", "horn"),
"n03417042": ("garbage truck", "dustcart"),
"n03425413": ("gas pump", "gasoline pump", "petrol pump", "island dispenser"),
"n03445777": ("golf ball",),
"n03888257": ("parachute", "chute"),
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
size: str = "full",
download=False,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
loader: Callable[[str], Any] = default_loader,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ["train", "val"])
self._size = verify_str_arg(size, "size", ["full", "320px", "160px"])
self._url, self._md5 = self._ARCHIVES[self._size]
self._size_root = Path(self.root) / Path(self._url).stem
self._image_root = str(self._size_root / self._split)
if download:
self._download()
elif not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
self.wnids, self.wnid_to_idx = find_classes(self._image_root)
self.classes = [self._WNID_TO_CLASS[wnid] for wnid in self.wnids]
self.class_to_idx = {
class_name: idx for wnid, idx in self.wnid_to_idx.items() for class_name in self._WNID_TO_CLASS[wnid]
}
self._samples = make_dataset(self._image_root, self.wnid_to_idx, extensions=".jpeg")
self.loader = loader
def _check_exists(self) -> bool:
return self._size_root.exists()
def _download(self):
if self._check_exists():
return
download_and_extract_archive(self._url, self.root, md5=self._md5)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
path, label = self._samples[idx]
image = self.loader(path)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
label = self.target_transform(label)
return image, label
def __len__(self) -> int:
return len(self._samples)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .folder import find_classes, make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Imagenette(VisionDataset):
"""`Imagenette <https://github.com/fastai/imagenette#imagenette-1>`_ image classification dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the Imagenette dataset.
split (string, optional): The dataset split. Supports ``"train"`` (default), and ``"val"``.
size (string, optional): The image size. Supports ``"full"`` (default), ``"320px"``, and ``"160px"``.
download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already
downloaded archives are not downloaded again.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version, e.g. ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class name, class index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (WordNet ID, class index).
"""
_ARCHIVES = {
"full": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz", "fe2fc210e6bb7c5664d602c3cd71e612"),
"320px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz", "3df6f0d01a2c9592104656642f5e78a3"),
"160px": ("https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-160.tgz", "e793b78cc4c9e9a4ccc0c1155377a412"),
}
_WNID_TO_CLASS = {
"n01440764": ("tench", "Tinca tinca"),
"n02102040": ("English springer", "English springer spaniel"),
"n02979186": ("cassette player",),
"n03000684": ("chain saw", "chainsaw"),
"n03028079": ("church", "church building"),
"n03394916": ("French horn", "horn"),
"n03417042": ("garbage truck", "dustcart"),
"n03425413": ("gas pump", "gasoline pump", "petrol pump", "island dispenser"),
"n03445777": ("golf ball",),
"n03888257": ("parachute", "chute"),
}
def __init__(
self,
root: Union[str, Path],
split: str = "train",
size: str = "full",
download=False,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ["train", "val"])
self._size = verify_str_arg(size, "size", ["full", "320px", "160px"])
self._url, self._md5 = self._ARCHIVES[self._size]
self._size_root = Path(self.root) / Path(self._url).stem
self._image_root = str(self._size_root / self._split)
if download:
self._download()
elif not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it.")
self.wnids, self.wnid_to_idx = find_classes(self._image_root)
self.classes = [self._WNID_TO_CLASS[wnid] for wnid in self.wnids]
self.class_to_idx = {
class_name: idx for wnid, idx in self.wnid_to_idx.items() for class_name in self._WNID_TO_CLASS[wnid]
}
self._samples = make_dataset(self._image_root, self.wnid_to_idx, extensions=".jpeg")
def _check_exists(self) -> bool:
return self._size_root.exists()
def _download(self):
if self._check_exists():
return
download_and_extract_archive(self._url, self.root, md5=self._md5)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
path, label = self._samples[idx]
image = Image.open(path).convert("RGB")
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
label = self.target_transform(label)
return image, label
def __len__(self) -> int:
return len(self._samples)
|
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
_underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
cleaned_req = req.split(" ")[0]
if cleaned_req.replace("-", "_") not in _underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg, *list(all_packages)]
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
"""**sys_info** prints information about the system and langchain packages for debugging purposes.""" # noqa: E501
from collections.abc import Sequence
def _get_sub_deps(packages: Sequence[str]) -> list[str]:
"""Get any specified sub-dependencies."""
from importlib import metadata
sub_deps = set()
_underscored_packages = {pkg.replace("-", "_") for pkg in packages}
for pkg in packages:
try:
required = metadata.requires(pkg)
except metadata.PackageNotFoundError:
continue
if not required:
continue
for req in required:
cleaned_req = req.split(" ")[0]
if cleaned_req.replace("-", "_") not in _underscored_packages:
sub_deps.add(cleaned_req)
return sorted(sub_deps, key=lambda x: x.lower())
def print_sys_info(*, additional_pkgs: Sequence[str] = ()) -> None:
"""Print information about the environment for debugging purposes.
Args:
additional_pkgs: Additional packages to include in the output.
"""
import pkgutil
import platform
import sys
from importlib import metadata, util
# Packages that do not start with "langchain" prefix.
other_langchain_packages = [
"langserve",
"langsmith",
]
langchain_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langchain")
]
langgraph_pkgs = [
name for _, name, _ in pkgutil.iter_modules() if name.startswith("langgraph")
]
all_packages = sorted(
set(
langchain_pkgs
+ langgraph_pkgs
+ other_langchain_packages
+ list(additional_pkgs)
)
)
# Always surface these packages to the top
order_by = ["langchain_core", "langchain", "langchain_community", "langsmith"]
for pkg in reversed(order_by):
if pkg in all_packages:
all_packages.remove(pkg)
all_packages = [pkg] + list(all_packages)
system_info = {
"OS": platform.system(),
"OS Version": platform.version(),
"Python Version": sys.version,
}
print() # noqa: T201
print("System Information") # noqa: T201
print("------------------") # noqa: T201
print("> OS: ", system_info["OS"]) # noqa: T201
print("> OS Version: ", system_info["OS Version"]) # noqa: T201
print("> Python Version: ", system_info["Python Version"]) # noqa: T201
# Print out only langchain packages
print() # noqa: T201
print("Package Information") # noqa: T201
print("-------------------") # noqa: T201
not_installed = []
for pkg in all_packages:
try:
found_package = util.find_spec(pkg)
except Exception:
found_package = None
if found_package is None:
not_installed.append(pkg)
continue
# Package version
try:
package_version = metadata.version(pkg)
except Exception:
package_version = None
# Print package with version
if package_version is not None:
print(f"> {pkg}: {package_version}") # noqa: T201
else:
print(f"> {pkg}: Installed. No version info available.") # noqa: T201
if not_installed:
print() # noqa: T201
print("Optional packages not installed") # noqa: T201
print("-------------------------------") # noqa: T201
for pkg in not_installed:
print(f"> {pkg}") # noqa: T201
sub_dependencies = _get_sub_deps(all_packages)
if sub_dependencies:
print() # noqa: T201
print("Other Dependencies") # noqa: T201
print("------------------") # noqa: T201
for dep in sub_dependencies:
try:
dep_version = metadata.version(dep)
print(f"> {dep}: {dep_version}") # noqa: T201
except Exception:
print(f"> {dep}: Installed. No version info available.") # noqa: T201
if __name__ == "__main__":
print_sys_info()
|
from typing import Any
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.integration_tests import RetrieversIntegrationTests
class ParrotRetriever(BaseRetriever):
parrot_name: str
k: int = 3
def _get_relevant_documents(self, query: str, **kwargs: Any) -> list[Document]:
k = kwargs.get("k", self.k)
return [Document(page_content=f"{self.parrot_name} says: {query}")] * k
class TestParrotRetrieverIntegration(RetrieversIntegrationTests):
@property
def retriever_constructor(self) -> type[ParrotRetriever]:
return ParrotRetriever
@property
def retriever_constructor_params(self) -> dict:
return {"parrot_name": "Polly"}
@property
def retriever_query_example(self) -> str:
return "parrot"
|
from typing import Any, Type
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_tests.integration_tests import RetrieversIntegrationTests
class ParrotRetriever(BaseRetriever):
parrot_name: str
k: int = 3
def _get_relevant_documents(self, query: str, **kwargs: Any) -> list[Document]:
k = kwargs.get("k", self.k)
return [Document(page_content=f"{self.parrot_name} says: {query}")] * k
class TestParrotRetrieverIntegration(RetrieversIntegrationTests):
@property
def retriever_constructor(self) -> Type[ParrotRetriever]:
return ParrotRetriever
@property
def retriever_constructor_params(self) -> dict:
return {"parrot_name": "Polly"}
@property
def retriever_query_example(self) -> str:
return "parrot"
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
train_data_file = os.path.join(os.environ['TEST_WORKSPACE'], 'train.npy')
train_data = np.array(np.random.random([1024, 10]), dtype=np.float32)
np.save(train_data_file, train_data)
trained_index_file = os.path.join(os.environ['TEST_WORKSPACE'], 'faiss.index')
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
},
)
with f:
# the trained index will be dumped to "faiss.index"
f.post(on='/train', parameters={'train_data_file': train_data_file})
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
'dump_path': dump_path,
},
)
with f:
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert d.matches[0].scores['l2'].value >= d.matches[1].scores['l2'].value
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_train_and_index(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
train_data_file = os.path.join(os.environ['TEST_WORKSPACE'], 'train.npy')
train_data = np.array(np.random.random([1024, 10]), dtype=np.float32)
np.save(train_data_file, train_data)
trained_index_file = os.path.join(os.environ['TEST_WORKSPACE'], 'faiss.index')
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
},
)
with f:
# the trained index will be dumped to "faiss.index"
f.post(on='/train', parameters={'train_data_file': train_data_file})
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'IVF10_HNSW32,PQ2',
'trained_index_file': trained_index_file,
'dump_path': dump_path,
},
)
with f:
result = f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert d.matches[0].scores['l2'].value >= d.matches[1].scores['l2'].value
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import AllegroTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class AllegroTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = AllegroTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim // 2)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 2, 8, 8)
@property
def output_shape(self):
return (4, 2, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"cross_attention_dim": 16,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"caption_channels": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"AllegroTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import AllegroTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class AllegroTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = AllegroTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 16
sequence_length = 16
hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim // 2)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (4, 2, 8, 8)
@property
def output_shape(self):
return (4, 2, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"num_layers": 1,
"cross_attention_dim": 16,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"caption_channels": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
|
import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoCtcDecoder,
TempDirMixin,
TorchaudioTestCase,
)
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decoder(self, tokens=None, use_lm=True, use_lexicon=True, **kwargs):
from torchaudio.models.decoder import ctc_decoder
if use_lexicon:
lexicon_file = get_asset_path("decoder/lexicon.txt")
kenlm_file = get_asset_path("decoder/kenlm.arpa") if use_lm else None
else:
lexicon_file = None
kenlm_file = get_asset_path("decoder/kenlm_char.arpa") if use_lm else None
if tokens is None:
tokens = get_asset_path("decoder/tokens.txt")
return ctc_decoder(
lexicon=lexicon_file,
tokens=tokens,
lm=kenlm_file,
**kwargs,
)
def _get_emissions(self):
B, T, N = 4, 15, NUM_TOKENS
emissions = torch.rand(B, T, N)
return emissions
@parameterized.expand(
list(
itertools.product(
[get_asset_path("decoder/tokens.txt"), ["-", "|", "f", "o", "b", "a", "r"]],
[True, False],
[True, False],
)
),
)
def test_construct_decoder(self, tokens, use_lm, use_lexicon):
self._get_decoder(tokens=tokens, use_lm=use_lm, use_lexicon=use_lexicon)
@parameterized.expand(
[(True,), (False,)],
)
def test_shape(self, use_lexicon):
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
self.assertEqual(len(results), emissions.shape[0])
@parameterized.expand(
[(True,), (False,)],
)
def test_timesteps_shape(self, use_lexicon):
"""Each token should correspond with a timestep"""
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
for i in range(emissions.shape[0]):
result = results[i][0]
self.assertEqual(result.tokens.shape, result.timesteps.shape)
def test_no_lm_decoder(self):
"""Check that using no LM produces the same result as using an LM with 0 lm_weight"""
kenlm_decoder = self._get_decoder(lm_weight=0)
zerolm_decoder = self._get_decoder(use_lm=False)
emissions = self._get_emissions()
kenlm_results = kenlm_decoder(emissions)
zerolm_results = zerolm_decoder(emissions)
self.assertEqual(kenlm_results, zerolm_results)
def test_get_timesteps(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3])
decoder = self._get_decoder()
timesteps = decoder._get_timesteps(unprocessed_tokens)
expected = [0, 3, 7]
self.assertEqual(timesteps, expected)
def test_get_tokens_and_idxs(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3]) # ["f", "f", "-", "o", "o", "o", "-", "o"]
decoder = self._get_decoder()
token_ids = decoder._get_tokens(unprocessed_tokens)
tokens = decoder.idxs_to_tokens(token_ids)
expected_ids = [2, 3, 3]
self.assertEqual(token_ids, expected_ids)
expected_tokens = ["f", "o", "o"]
self.assertEqual(tokens, expected_tokens)
@parameterized.expand([(get_asset_path("decoder/tokens.txt"),), (["-", "|", "f", "o", "b", "a", "r"],)])
def test_index_to_tokens(self, tokens):
# decoder tokens: '-' '|' 'f' 'o' 'b' 'a' 'r'
decoder = self._get_decoder(tokens)
idxs = torch.LongTensor((1, 2, 1, 3, 5))
tokens = decoder.idxs_to_tokens(idxs)
expected_tokens = ["|", "f", "|", "o", "a"]
self.assertEqual(tokens, expected_tokens)
|
import itertools
import torch
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoCtcDecoder,
TempDirMixin,
TorchaudioTestCase,
)
NUM_TOKENS = 8
@skipIfNoCtcDecoder
class CTCDecoderTest(TempDirMixin, TorchaudioTestCase):
def _get_decoder(self, tokens=None, use_lm=True, use_lexicon=True, **kwargs):
from torchaudio.models.decoder import ctc_decoder
if use_lexicon:
lexicon_file = get_asset_path("decoder/lexicon.txt")
kenlm_file = get_asset_path("decoder/kenlm.arpa") if use_lm else None
else:
lexicon_file = None
kenlm_file = get_asset_path("decoder/kenlm_char.arpa") if use_lm else None
if tokens is None:
tokens = get_asset_path("decoder/tokens.txt")
return ctc_decoder(
lexicon=lexicon_file,
tokens=tokens,
lm=kenlm_file,
**kwargs,
)
def _get_emissions(self):
B, T, N = 4, 15, NUM_TOKENS
torch.manual_seed(0)
emissions = torch.rand(B, T, N)
return emissions
@parameterized.expand(
list(
itertools.product(
[get_asset_path("decoder/tokens.txt"), ["-", "|", "f", "o", "b", "a", "r"]],
[True, False],
[True, False],
)
),
)
def test_construct_decoder(self, tokens, use_lm, use_lexicon):
self._get_decoder(tokens=tokens, use_lm=use_lm, use_lexicon=use_lexicon)
@parameterized.expand(
[(True,), (False,)],
)
def test_shape(self, use_lexicon):
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
self.assertEqual(len(results), emissions.shape[0])
@parameterized.expand(
[(True,), (False,)],
)
def test_timesteps_shape(self, use_lexicon):
"""Each token should correspond with a timestep"""
emissions = self._get_emissions()
decoder = self._get_decoder(use_lexicon=use_lexicon)
results = decoder(emissions)
for i in range(emissions.shape[0]):
result = results[i][0]
self.assertEqual(result.tokens.shape, result.timesteps.shape)
def test_no_lm_decoder(self):
"""Check that using no LM produces the same result as using an LM with 0 lm_weight"""
kenlm_decoder = self._get_decoder(lm_weight=0)
zerolm_decoder = self._get_decoder(use_lm=False)
emissions = self._get_emissions()
kenlm_results = kenlm_decoder(emissions)
zerolm_results = zerolm_decoder(emissions)
self.assertEqual(kenlm_results, zerolm_results)
def test_get_timesteps(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3])
decoder = self._get_decoder()
timesteps = decoder._get_timesteps(unprocessed_tokens)
expected = [0, 3, 7]
self.assertEqual(timesteps, expected)
def test_get_tokens_and_idxs(self):
unprocessed_tokens = torch.tensor([2, 2, 0, 3, 3, 3, 0, 3]) # ["f", "f", "-", "o", "o", "o", "-", "o"]
decoder = self._get_decoder()
token_ids = decoder._get_tokens(unprocessed_tokens)
tokens = decoder.idxs_to_tokens(token_ids)
expected_ids = [2, 3, 3]
self.assertEqual(token_ids, expected_ids)
expected_tokens = ["f", "o", "o"]
self.assertEqual(tokens, expected_tokens)
@parameterized.expand([(get_asset_path("decoder/tokens.txt"),), (["-", "|", "f", "o", "b", "a", "r"],)])
def test_index_to_tokens(self, tokens):
# decoder tokens: '-' '|' 'f' 'o' 'b' 'a' 'r'
decoder = self._get_decoder(tokens)
idxs = torch.LongTensor((1, 2, 1, 3, 5))
tokens = decoder.idxs_to_tokens(idxs)
expected_tokens = ["|", "f", "|", "o", "a"]
self.assertEqual(tokens, expected_tokens)
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
.. warning::
Starting with version 2.8, we are refactoring TorchAudio to transition it
into a maintenance phase. As a result:
- Some APIs are deprecated in 2.8 and will be removed in 2.9.
- The decoding and encoding capabilities of PyTorch for both audio and video
are being consolidated into TorchCodec.
Please see https://github.com/pytorch/audio/issues/3902 for more information.
"""
from typing import Dict, List
import torchaudio
sox_ext = torchaudio._extension.lazy_import_sox_ext()
from torchaudio._internal.module_utils import dropping_support
@dropping_support
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_seed(seed)
@dropping_support
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_verbosity(verbosity)
@dropping_support
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_buffer_size(buffer_size)
@dropping_support
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_use_threads(use_threads)
@dropping_support
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(sox_ext.list_effects())
@dropping_support
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_read_formats()
@dropping_support
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_write_formats()
@dropping_support
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return sox_ext.get_buffer_size()
|
"""Module to change the configuration of libsox, which is used by I/O functions like
:py:mod:`~torchaudio.backend.sox_io_backend` and :py:mod:`~torchaudio.sox_effects`.
"""
from typing import Dict, List
import torchaudio
sox_ext = torchaudio._extension.lazy_import_sox_ext()
from torchaudio._internal.module_utils import dropping_support
@dropping_support
def set_seed(seed: int):
"""Set libsox's PRNG
Args:
seed (int): seed value. valid range is int32.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_seed(seed)
@dropping_support
def set_verbosity(verbosity: int):
"""Set libsox's verbosity
Args:
verbosity (int): Set verbosity level of libsox.
* ``1`` failure messages
* ``2`` warnings
* ``3`` details of processing
* ``4``-``6`` increasing levels of debug messages
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_verbosity(verbosity)
@dropping_support
def set_buffer_size(buffer_size: int):
"""Set buffer size for sox effect chain
Args:
buffer_size (int): Set the size in bytes of the buffers used for processing audio.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_buffer_size(buffer_size)
@dropping_support
def set_use_threads(use_threads: bool):
"""Set multithread option for sox effect chain
Args:
use_threads (bool): When ``True``, enables ``libsox``'s parallel effects channels processing.
To use mutlithread, the underlying ``libsox`` has to be compiled with OpenMP support.
See Also:
http://sox.sourceforge.net/sox.html
"""
sox_ext.set_use_threads(use_threads)
@dropping_support
def list_effects() -> Dict[str, str]:
"""List the available sox effect names
Returns:
Dict[str, str]: Mapping from ``effect name`` to ``usage``
"""
return dict(sox_ext.list_effects())
@dropping_support
def list_read_formats() -> List[str]:
"""List the supported audio formats for read
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_read_formats()
@dropping_support
def list_write_formats() -> List[str]:
"""List the supported audio formats for write
Returns:
List[str]: List of supported audio formats
"""
return sox_ext.list_write_formats()
@dropping_support
def get_buffer_size() -> int:
"""Get buffer size for sox effect chain
Returns:
int: size in bytes of buffers used for processing audio.
"""
return sox_ext.get_buffer_size()
|
from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import (
ID,
AnyUrl,
Embedding,
ImageUrl,
Tensor,
TextUrl,
TorchTensor,
)
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
tensor=Tensor,
torch_tensor=TorchTensor,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
tensor=Tensor,
torch_tensor=TorchTensor,
embedding=Embedding,
any_url=AnyUrl,
image_url=ImageUrl,
id=ID,
)
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b4.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b4.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
img_norm_cfg=img_norm_cfg)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
"""Test ZhipuAI Chat Model."""
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.tools import tool
from langchain_community.chat_models.zhipuai import ChatZhipuAI
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test default model call."""
chat = ChatZhipuAI()
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_model() -> None:
"""Test model kwarg works."""
chat = ChatZhipuAI(model="glm-4")
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = ChatZhipuAI()
response = chat.invoke(
[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="How are you doing?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = ChatZhipuAI(streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="Who are you?"),
],
stream=True,
config={"callbacks": callback_manager},
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = ChatZhipuAI()
message = HumanMessage(content="Hi, how are you.")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
@tool
def add(a: int, b: int) -> int:
"""Adds a and b."""
return a + b
@tool
def multiply(a: int, b: int) -> int:
"""Multiplies a and b."""
return a * b
def test_tool_call() -> None:
"""Test tool calling by ChatZhipuAI"""
chat = ChatZhipuAI(model="glm-4-long")
tools = [add, multiply]
chat_with_tools = chat.bind_tools(tools)
query = "What is 3 * 12?"
messages = [HumanMessage(query)]
ai_msg = chat_with_tools.invoke(messages)
assert isinstance(ai_msg, AIMessage)
assert isinstance(ai_msg.tool_calls, list)
assert len(ai_msg.tool_calls) == 1
tool_call = ai_msg.tool_calls[0]
assert "args" in tool_call
messages.append(ai_msg) # type: ignore[arg-type]
for tool_call in ai_msg.tool_calls:
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
tool_output = selected_tool.invoke(tool_call["args"])
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"])) # type: ignore[arg-type]
response = chat_with_tools.invoke(messages)
assert isinstance(response, AIMessage)
|
"""Test ZhipuAI Chat Model."""
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.tools import tool
from langchain_community.chat_models.zhipuai import ChatZhipuAI
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test default model call."""
chat = ChatZhipuAI()
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_model() -> None:
"""Test model kwarg works."""
chat = ChatZhipuAI(model="glm-4")
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = ChatZhipuAI()
response = chat.invoke(
[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="How are you doing?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = ChatZhipuAI(streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
[
HumanMessage(content="Hello."),
AIMessage(content="Hello!"),
HumanMessage(content="Who are you?"),
],
stream=True,
config={"callbacks": callback_manager},
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = ChatZhipuAI()
message = HumanMessage(content="Hi, how are you.")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
@tool
def add(a: int, b: int) -> int:
"""Adds a and b."""
return a + b
@tool
def multiply(a: int, b: int) -> int:
"""Multiplies a and b."""
return a * b
def test_tool_call() -> None:
"""Test tool calling by ChatZhipuAI"""
chat = ChatZhipuAI(model="glm-4-long") # type: ignore[call-arg]
tools = [add, multiply]
chat_with_tools = chat.bind_tools(tools)
query = "What is 3 * 12?"
messages = [HumanMessage(query)]
ai_msg = chat_with_tools.invoke(messages)
assert isinstance(ai_msg, AIMessage)
assert isinstance(ai_msg.tool_calls, list)
assert len(ai_msg.tool_calls) == 1
tool_call = ai_msg.tool_calls[0]
assert "args" in tool_call
messages.append(ai_msg) # type: ignore[arg-type]
for tool_call in ai_msg.tool_calls:
selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
tool_output = selected_tool.invoke(tool_call["args"]) # type: ignore[attr-defined]
messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"])) # type: ignore[arg-type]
response = chat_with_tools.invoke(messages)
assert isinstance(response, AIMessage)
|
import logging
import os
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
if TYPE_CHECKING:
from fastapi import FastAPI
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement this abstract class in-case you want to build a fastapi-based Gateway by
implementing the `app` property. This property should return a fastapi app. The base Gateway will handle starting
a server and serving the application using that server."""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: bool = False,
**kwargs
):
"""Initialize the FastAPIBaseGateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.uvicorn_kwargs = uvicorn_kwargs or {}
if ssl_keyfile and 'ssl_keyfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_keyfile'] = ssl_keyfile
if ssl_certfile and 'ssl_certfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_certfile'] = ssl_certfile
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
@property
@abstractmethod
def app(self):
'''Get a FastAPI app'''
...
async def setup_server(self):
"""
Initialize and return GRPC server
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
# app property will generate a new fastapi app each time called
app = self.app
_install_health_check(app, self.logger)
self.server = UviServer(
config=Config(
app=app,
host=self.host,
port=self.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**self.uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""
Free resources allocated when setting up HTTP server
"""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run HTTP server forever"""
await self.server.serve()
def _install_health_check(app: 'FastAPI', logger):
health_check_exists = False
for route in app.routes:
if getattr(route, 'path', None) == '/' and 'GET' in getattr(
route, 'methods', None
):
health_check_exists = True
logger.warning(
'endpoint GET on "/" is used for health checks, make sure it\'s still accessible'
)
if not health_check_exists:
@app.get('/')
def health_check():
return {}
|
import logging
import os
from abc import abstractmethod
from typing import Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement this abstract class in-case you want to build a fastapi-based Gateway by
implementing the `app` property. This property should return a fastapi app. The base Gateway will handle starting
a server and serving the application using that server."""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: bool = False,
**kwargs
):
"""Initialize the FastAPIBaseGateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.uvicorn_kwargs = uvicorn_kwargs or {}
if ssl_keyfile and 'ssl_keyfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_keyfile'] = ssl_keyfile
if ssl_certfile and 'ssl_certfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_certfile'] = ssl_certfile
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
@property
@abstractmethod
def app(self):
'''Get a FastAPI app'''
...
async def setup_server(self):
"""
Initialize and return GRPC server
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
self.server = UviServer(
config=Config(
app=self.app,
host=self.host,
port=self.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**self.uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""
Free resources allocated when setting up HTTP server
"""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run HTTP server forever"""
await self.server.serve()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import os
from logging import getLogger
from typing import List
from sentencepiece import SentencePieceProcessor
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from sentencepiece import SentencePieceProcessor
from logging import getLogger
from typing import List
import os
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.info(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.info(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
|
from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(text=str(self))
|
from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should be called when the Document
is nested into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(text=str(self))
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client: Client) -> None:
# generate some random data for demonstration
m = 100000
n = 100
rng = da.random.default_rng(1)
X = rng.normal(size=(m, n), chunks=(10000, -1))
y = X.sum(axis=1)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
print("Error:", da.sqrt((prediction - y) ** 2).mean().compute())
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client: Client) -> None:
# generate some random data for demonstration
m = 100000
n = 100
rng = da.random.default_rng(1)
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
print("Error:", da.sqrt((prediction - y) ** 2).mean().compute())
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.transforms
return torchvision.transforms
class ClassificationPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter. We may change that in the
# future though, if we change the output type from the dataset.
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
use_v2=False,
):
T = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms.append(T.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
transforms.append(T.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
transforms.append(T.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
transforms.append(T.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
transforms.append(T.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = T.AutoAugmentPolicy(auto_augment_policy)
transforms.append(T.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
transforms.append(T.PILToTensor())
transforms.extend(
[
T.ToDtype(torch.float, scale=True) if use_v2 else T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
transforms.append(T.RandomErasing(p=random_erase_prob))
if use_v2:
transforms.append(T.ToPureTensor())
self.transforms = T.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
use_v2=False,
):
T = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms += [
T.Resize(resize_size, interpolation=interpolation, antialias=True),
T.CenterCrop(crop_size),
]
if backend == "pil":
transforms.append(T.PILToTensor())
transforms += [
T.ToDtype(torch.float, scale=True) if use_v2 else T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
if use_v2:
transforms.append(T.ToPureTensor())
self.transforms = T.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
|
import torch
from torchvision.transforms.functional import InterpolationMode
def get_module(use_v2):
# We need a protected import to avoid the V2 warning in case just V1 is used
if use_v2:
import torchvision.transforms.v2
return torchvision.transforms.v2
else:
import torchvision.transforms
return torchvision.transforms
class ClassificationPresetTrain:
# Note: this transform assumes that the input to forward() are always PIL
# images, regardless of the backend parameter. We may change that in the
# future though, if we change the output type from the dataset.
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
auto_augment_policy=None,
ra_magnitude=9,
augmix_severity=3,
random_erase_prob=0.0,
backend="pil",
use_v2=False,
):
T = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms.append(T.RandomResizedCrop(crop_size, interpolation=interpolation, antialias=True))
if hflip_prob > 0:
transforms.append(T.RandomHorizontalFlip(hflip_prob))
if auto_augment_policy is not None:
if auto_augment_policy == "ra":
transforms.append(T.RandAugment(interpolation=interpolation, magnitude=ra_magnitude))
elif auto_augment_policy == "ta_wide":
transforms.append(T.TrivialAugmentWide(interpolation=interpolation))
elif auto_augment_policy == "augmix":
transforms.append(T.AugMix(interpolation=interpolation, severity=augmix_severity))
else:
aa_policy = T.AutoAugmentPolicy(auto_augment_policy)
transforms.append(T.AutoAugment(policy=aa_policy, interpolation=interpolation))
if backend == "pil":
transforms.append(T.PILToTensor())
transforms.extend(
[
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
transforms.append(T.RandomErasing(p=random_erase_prob))
if use_v2:
transforms.append(T.ToPureTensor())
self.transforms = T.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BILINEAR,
backend="pil",
use_v2=False,
):
T = get_module(use_v2)
transforms = []
backend = backend.lower()
if backend == "tensor":
transforms.append(T.PILToTensor())
elif backend != "pil":
raise ValueError(f"backend can be 'tensor' or 'pil', but got {backend}")
transforms += [
T.Resize(resize_size, interpolation=interpolation, antialias=True),
T.CenterCrop(crop_size),
]
if backend == "pil":
transforms.append(T.PILToTensor())
transforms += [
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
if use_v2:
transforms.append(T.ToPureTensor())
self.transforms = T.Compose(transforms)
def __call__(self, img):
return self.transforms(img)
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(image: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(image, np.ndarray):
output = torch.from_numpy(image).permute((2, 0, 1)).contiguous()
elif isinstance(image, PIL.Image.Image):
output = pil_to_tensor(image)
else: # isinstance(inpt, torch.Tensor):
output = image
return features.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
from typing import Any, Dict, Tuple, Union
import numpy as np
import PIL.Image
import torch
from torchvision.io.video import read_video
from torchvision.prototype import features
from torchvision.prototype.utils._internal import ReadOnlyTensorBuffer
from torchvision.transforms import functional as _F
@torch.jit.unused
def decode_image_with_pil(encoded_image: torch.Tensor) -> features.Image:
image = torch.as_tensor(np.array(PIL.Image.open(ReadOnlyTensorBuffer(encoded_image)), copy=True))
if image.ndim == 2:
image = image.unsqueeze(2)
return features.Image(image.permute(2, 0, 1))
@torch.jit.unused
def decode_video_with_av(encoded_video: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]:
import unittest.mock
with unittest.mock.patch("torchvision.io.video.os.path.exists", return_value=True):
return read_video(ReadOnlyTensorBuffer(encoded_video)) # type: ignore[arg-type]
@torch.jit.unused
def to_image_tensor(image: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(image, np.ndarray):
output = torch.from_numpy(image).permute((2, 0, 1)).contiguous()
elif isinstance(image, PIL.Image.Image):
output = pil_to_tensor(image)
else: # isinstance(inpt, torch.Tensor):
output = image
return features.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')),
neck=dict(
type='FPN',
in_channels=[32, 64, 160, 384],
out_channels=256,
num_outs=5))
|
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_400mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')),
neck=dict(
type='FPN',
in_channels=[32, 64, 160, 384],
out_channels=256,
num_outs=5))
|
from jina import DocumentArray, Executor, Flow, requests
def test_needs_docs_map():
class TestMergeDictDocMatrixExecutor(Executor):
@requests()
def foo(self, docs_map, **kwargs):
assert {'exec0', 'exec1'} == set(docs_map.keys())
f = (
Flow()
.add(name='exec0')
.add(name='exec1', replicas=2, shards=2, needs=['gateway'])
.add(
name='exec2',
needs=['exec0', 'exec1'],
uses=TestMergeDictDocMatrixExecutor,
disable_reduce=True,
)
)
with f:
f.post(on='/', inputs=DocumentArray.empty(2))
|
from jina import Flow, Executor, requests, DocumentArray
def test_needs_docs_map():
class TestMergeDictDocMatrixExecutor(Executor):
@requests()
def foo(self, docs_map, **kwargs):
assert {'exec0', 'exec1'} == set(docs_map.keys())
f = Flow().add(name='exec0'). \
add(name='exec1', replicas=2, shards=2, needs=['gateway']). \
add(name='exec2',
needs=['exec0', 'exec1'],
uses=TestMergeDictDocMatrixExecutor,
disable_reduce=True)
with f:
f.post(on='/', inputs=DocumentArray.empty(2))
|
from __future__ import annotations
import time
import torch
from torch._dynamo import device_interface # noqa: PLC2701 import-private-name
class DeviceProperties:
def __init__(self) -> None:
self.major = 8 # TODO: bypass check for H100 in triton_heuristics.py
self.max_threads_per_multi_processor = 1
self.multi_processor_count = 80
class DeviceInterface(device_interface.DeviceInterface):
class Event(torch.Event):
def __init__(
self,
enable_timing: bool = False,
blocking: bool = False,
interprocess: bool = False,
) -> None:
self.enable_timing = enable_timing
self.recorded_time: int | None = None
def record(self, stream) -> None:
if not self.enable_timing:
return
assert self.recorded_time is None
self.recorded_time = time.perf_counter_ns()
def elapsed_time(self, end_event: DeviceInterface.Event) -> float:
assert self.recorded_time
assert end_event.recorded_time
# convert to ms
return (end_event.recorded_time - self.recorded_time) / 1000000
def wait(self, stream) -> None:
pass
def query(self) -> None:
pass
def synchronize(self) -> None:
pass
class device: # noqa: N801 invalid-class-name # pyright: ignore [reportIncompatibleVariableOverride]
def __init__(self, device) -> None:
self.device = device
class Worker(device_interface.DeviceInterface.Worker):
@staticmethod
def set_device(device: int) -> None:
# No device index for our backend
pass
@staticmethod
def current_device() -> int:
# No device index for our backend
return 0
@staticmethod
def get_device_properties(
device=None,
) -> DeviceProperties:
return DeviceProperties()
@staticmethod
def current_device() -> int:
return 0
@staticmethod
def set_device(device) -> None:
pass
@staticmethod
def device_count() -> int:
return 1
@staticmethod
def maybe_exchange_device(device: int) -> int:
assert device == 0, (
f"Only device index 0 is supported, tried to set index to {device}"
)
return 0 # previous device is always 0
@staticmethod
def exchange_device(device: int) -> int:
assert device == 0, (
f"Only device index 0 is supported, tried to set index to {device}"
)
return 0 # previous device is always 0
@staticmethod
def get_raw_stream(device_index: int):
return None
@staticmethod
def synchronize(device) -> None:
pass
# Can be mock patched by @patch decorator.
@staticmethod
def is_available() -> bool:
return True
@staticmethod
def get_compute_capability(device) -> int:
return 0
|
from __future__ import annotations
import time
import torch
from torch._dynamo import device_interface # noqa: PLC2701 import-private-name
class DeviceProperties:
def __init__(self) -> None:
self.major = 8 # TODO: bypass check for H100 in triton_heuristics.py
self.max_threads_per_multi_processor = 1
self.multi_processor_count = 80
class DeviceInterface(device_interface.DeviceInterface):
class Event(torch.Event):
def __init__(
self,
enable_timing: bool = False,
blocking: bool = False,
interprocess: bool = False,
) -> None:
self.enable_timing = enable_timing
self.recorded_time: int | None = None
def record(self, stream) -> None:
if not self.enable_timing:
return
assert self.recorded_time is None
self.recorded_time = time.perf_counter_ns()
def elapsed_time(self, end_event: DeviceInterface.Event) -> float:
assert self.recorded_time
assert end_event.recorded_time
# convert to ms
return (end_event.recorded_time - self.recorded_time) / 1000000
def wait(self, stream) -> None:
pass
def query(self) -> None:
pass
def synchronize(self) -> None:
pass
class device: # noqa: N801 invalid-class-name # pyright: ignore [reportIncompatibleVariableOverride]
def __init__(self, device) -> None:
self.device = device
class Worker(device_interface.DeviceInterface.Worker):
@staticmethod
def set_device(device: int) -> None:
# No device index for our backend
pass
@staticmethod
def current_device() -> int:
# No device index for our backend
return 0
@staticmethod
def get_device_properties(
device=None,
) -> DeviceProperties:
return DeviceProperties()
@staticmethod
def current_device() -> int:
return 0
@staticmethod
def set_device(device) -> None:
pass
@staticmethod
def device_count() -> int:
return 1
@staticmethod
def maybe_exchange_device(device: int) -> int:
assert (
device == 0
), f"Only device index 0 is supported, tried to set index to {device}"
return 0 # previous device is always 0
@staticmethod
def exchange_device(device: int) -> int:
assert (
device == 0
), f"Only device index 0 is supported, tried to set index to {device}"
return 0 # previous device is always 0
@staticmethod
def get_raw_stream(device_index: int):
return None
@staticmethod
def synchronize(device) -> None:
pass
# Can be mock patched by @patch decorator.
@staticmethod
def is_available() -> bool:
return True
@staticmethod
def get_compute_capability(device) -> int:
return 0
|
"""This file should contain all tests that need access to the internet (apart
from the ones in test_datasets_download.py)
We want to bundle all internet-related tests in one file, so the file can be
cleanly ignored in FB internal test infra.
"""
import os
import pathlib
from urllib.error import URLError
import pytest
import torchvision.datasets.utils as utils
class TestDatasetUtils:
@pytest.mark.parametrize("use_pathlib", (True, False))
def test_download_url(self, tmpdir, use_pathlib):
if use_pathlib:
tmpdir = pathlib.Path(tmpdir)
url = "http://github.com/pytorch/vision/archive/master.zip"
try:
utils.download_url(url, tmpdir)
assert len(os.listdir(tmpdir)) != 0
except URLError:
pytest.skip(f"could not download test file '{url}'")
@pytest.mark.parametrize("use_pathlib", (True, False))
def test_download_url_retry_http(self, tmpdir, use_pathlib):
if use_pathlib:
tmpdir = pathlib.Path(tmpdir)
url = "https://github.com/pytorch/vision/archive/master.zip"
try:
utils.download_url(url, tmpdir)
assert len(os.listdir(tmpdir)) != 0
except URLError:
pytest.skip(f"could not download test file '{url}'")
@pytest.mark.parametrize("use_pathlib", (True, False))
def test_download_url_dont_exist(self, tmpdir, use_pathlib):
if use_pathlib:
tmpdir = pathlib.Path(tmpdir)
url = "http://github.com/pytorch/vision/archive/this_doesnt_exist.zip"
with pytest.raises(URLError):
utils.download_url(url, tmpdir)
@pytest.mark.parametrize("use_pathlib", (True, False))
def test_download_url_dispatch_download_from_google_drive(self, mocker, tmpdir, use_pathlib):
if use_pathlib:
tmpdir = pathlib.Path(tmpdir)
url = "https://drive.google.com/file/d/1GO-BHUYRuvzr1Gtp2_fqXRsr9TIeYbhV/view"
id = "1GO-BHUYRuvzr1Gtp2_fqXRsr9TIeYbhV"
filename = "filename"
md5 = "md5"
mocked = mocker.patch("torchvision.datasets.utils.download_file_from_google_drive")
utils.download_url(url, tmpdir, filename, md5)
mocked.assert_called_once_with(id, os.path.expanduser(tmpdir), filename, md5)
if __name__ == "__main__":
pytest.main([__file__])
|
"""This file should contain all tests that need access to the internet (apart
from the ones in test_datasets_download.py)
We want to bundle all internet-related tests in one file, so the file can be
cleanly ignored in FB internal test infra.
"""
import os
from urllib.error import URLError
import pytest
import torchvision.datasets.utils as utils
class TestDatasetUtils:
def test_download_url(self, tmpdir):
url = "http://github.com/pytorch/vision/archive/master.zip"
try:
utils.download_url(url, tmpdir)
assert len(os.listdir(tmpdir)) != 0
except URLError:
pytest.skip(f"could not download test file '{url}'")
def test_download_url_retry_http(self, tmpdir):
url = "https://github.com/pytorch/vision/archive/master.zip"
try:
utils.download_url(url, tmpdir)
assert len(os.listdir(tmpdir)) != 0
except URLError:
pytest.skip(f"could not download test file '{url}'")
def test_download_url_dont_exist(self, tmpdir):
url = "http://github.com/pytorch/vision/archive/this_doesnt_exist.zip"
with pytest.raises(URLError):
utils.download_url(url, tmpdir)
def test_download_url_dispatch_download_from_google_drive(self, mocker, tmpdir):
url = "https://drive.google.com/file/d/1GO-BHUYRuvzr1Gtp2_fqXRsr9TIeYbhV/view"
id = "1GO-BHUYRuvzr1Gtp2_fqXRsr9TIeYbhV"
filename = "filename"
md5 = "md5"
mocked = mocker.patch("torchvision.datasets.utils.download_file_from_google_drive")
utils.download_url(url, tmpdir, filename, md5)
mocked.assert_called_once_with(id, tmpdir, filename, md5)
if __name__ == "__main__":
pytest.main([__file__])
|
"""Arg pack components."""
from typing import Any, Callable, Dict, Optional
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
class ArgPackComponent(QueryComponent):
"""
Arg pack component.
Packs arbitrary number of args into a list.
"""
convert_fn: Optional[Callable] = Field(
default=None, description="Function to convert output."
)
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
raise NotImplementedError
def validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs."""
return input
def _validate_component_outputs(self, output: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component outputs."""
# make sure output value is a list
if not isinstance(output["output"], list):
raise ValueError(f"Output is not a list.")
return output
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# combine all lists into one
output = []
for v in kwargs.values():
if self.convert_fn is not None:
v = self.convert_fn(v)
if isinstance(v, list):
output.extend(v)
else:
output.append(v)
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
return self._run_component(**kwargs)
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# NOTE: this shouldn't be used
return InputKeys.from_keys(set(), optional_keys=set())
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class KwargPackComponent(QueryComponent):
"""
Kwarg pack component.
Packs arbitrary number of kwargs into a dict.
"""
convert_fn: Optional[Callable] = Field(
default=None, description="Function to convert output."
)
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
raise NotImplementedError
def validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs."""
return input
def _validate_component_outputs(self, output: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component outputs."""
# make sure output value is a list
if not isinstance(output["output"], dict):
raise ValueError(f"Output is not a dict.")
return output
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
if self.convert_fn is not None:
for k, v in kwargs.items():
kwargs[k] = self.convert_fn(v)
return {"output": kwargs}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
return self._run_component(**kwargs)
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# NOTE: this shouldn't be used
return InputKeys.from_keys(set(), optional_keys=set())
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
|
"""Arg pack components."""
from typing import Any, Callable, Dict, Optional
from llama_index.core.base.query_pipeline.query import (
InputKeys,
OutputKeys,
QueryComponent,
)
from llama_index.core.bridge.pydantic import Field
class ArgPackComponent(QueryComponent):
"""Arg pack component.
Packs arbitrary number of args into a list.
"""
convert_fn: Optional[Callable] = Field(
default=None, description="Function to convert output."
)
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
raise NotImplementedError
def validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs."""
return input
def _validate_component_outputs(self, output: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component outputs."""
# make sure output value is a list
if not isinstance(output["output"], list):
raise ValueError(f"Output is not a list.")
return output
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# combine all lists into one
output = []
for v in kwargs.values():
if self.convert_fn is not None:
v = self.convert_fn(v)
if isinstance(v, list):
output.extend(v)
else:
output.append(v)
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
return self._run_component(**kwargs)
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# NOTE: this shouldn't be used
return InputKeys.from_keys(set(), optional_keys=set())
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class KwargPackComponent(QueryComponent):
"""Kwarg pack component.
Packs arbitrary number of kwargs into a dict.
"""
convert_fn: Optional[Callable] = Field(
default=None, description="Function to convert output."
)
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
raise NotImplementedError
def validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs."""
return input
def _validate_component_outputs(self, output: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component outputs."""
# make sure output value is a list
if not isinstance(output["output"], dict):
raise ValueError(f"Output is not a dict.")
return output
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
if self.convert_fn is not None:
for k, v in kwargs.items():
kwargs[k] = self.convert_fn(v)
return {"output": kwargs}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
return self._run_component(**kwargs)
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# NOTE: this shouldn't be used
return InputKeys.from_keys(set(), optional_keys=set())
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
|
_base_ = 'faster-rcnn_r50_fpg_crop640-50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128)))
|
_base_ = 'faster-rcnn_r50_fpn_crop640-50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(out_channels=128, inter_channels=128),
rpn_head=dict(in_channels=128),
roi_head=dict(
bbox_roi_extractor=dict(out_channels=128),
bbox_head=dict(in_channels=128)))
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class InstructPix2Pix(ExamplesTestsAccelerate):
def test_instruct_pix2pix_checkpointing_checkpoints_total_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/instruct_pix2pix/train_instruct_pix2pix.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/instructpix2pix-10-samples
--resolution=64
--random_flip
--train_batch_size=1
--max_train_steps=6
--checkpointing_steps=2
--checkpoints_total_limit=2
--output_dir {tmpdir}
--seed=0
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-4", "checkpoint-6"},
)
def test_instruct_pix2pix_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/instruct_pix2pix/train_instruct_pix2pix.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/instructpix2pix-10-samples
--resolution=64
--random_flip
--train_batch_size=1
--max_train_steps=4
--checkpointing_steps=2
--output_dir {tmpdir}
--seed=0
""".split()
run_command(self._launch_args + test_args)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
resume_run_args = f"""
examples/instruct_pix2pix/train_instruct_pix2pix.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/instructpix2pix-10-samples
--resolution=64
--random_flip
--train_batch_size=1
--max_train_steps=8
--checkpointing_steps=2
--output_dir {tmpdir}
--seed=0
--resume_from_checkpoint=checkpoint-4
--checkpoints_total_limit=2
""".split()
run_command(self._launch_args + resume_run_args)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-6", "checkpoint-8"},
)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class InstructPix2Pix(ExamplesTestsAccelerate):
def test_instruct_pix2pix_checkpointing_checkpoints_total_limit(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/instruct_pix2pix/train_instruct_pix2pix.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/instructpix2pix-10-samples
--resolution=64
--random_flip
--train_batch_size=1
--max_train_steps=6
--checkpointing_steps=2
--checkpoints_total_limit=2
--output_dir {tmpdir}
--seed=0
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-4", "checkpoint-6"},
)
def test_instruct_pix2pix_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/instruct_pix2pix/train_instruct_pix2pix.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/instructpix2pix-10-samples
--resolution=64
--random_flip
--train_batch_size=1
--max_train_steps=4
--checkpointing_steps=2
--output_dir {tmpdir}
--seed=0
""".split()
run_command(self._launch_args + test_args)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4"},
)
resume_run_args = f"""
examples/instruct_pix2pix/train_instruct_pix2pix.py
--pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe
--dataset_name=hf-internal-testing/instructpix2pix-10-samples
--resolution=64
--random_flip
--train_batch_size=1
--max_train_steps=8
--checkpointing_steps=2
--output_dir {tmpdir}
--seed=0
--resume_from_checkpoint=checkpoint-4
--checkpoints_total_limit=2
""".split()
run_command(self._launch_args + resume_run_args)
# check checkpoint directories exist
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-6", "checkpoint-8"},
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import point_sample
from torch import Tensor
def get_uncertainty(mask_pred: Tensor, labels: Tensor) -> Tensor:
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `classes`.
Args:
mask_pred (Tensor): mask predication logits, shape (num_rois,
num_classes, mask_height, mask_width).
labels (Tensor): Either predicted or ground truth label for
each predicted mask, of length num_rois.
Returns:
scores (Tensor): Uncertainty scores with the most uncertain
locations having the highest uncertainty score,
shape (num_rois, 1, mask_height, mask_width)
"""
if mask_pred.shape[1] == 1:
gt_class_logits = mask_pred.clone()
else:
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
return -torch.abs(gt_class_logits)
def get_uncertain_point_coords_with_randomness(
mask_pred: Tensor, labels: Tensor, num_points: int,
oversample_ratio: float, importance_sample_ratio: float) -> Tensor:
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (Tensor): The ground truth class for each instance.
num_points (int): The number of points to sample.
oversample_ratio (float): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled
via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = mask_pred.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=mask_pred.device)
point_logits = point_sample(mask_pred, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = get_uncertainty(point_logits, labels)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=mask_pred.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_roi_coords = torch.rand(
batch_size, num_random_points, 2, device=mask_pred.device)
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
return point_coords
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import point_sample
def get_uncertainty(mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `classes`.
Args:
mask_pred (Tensor): mask predication logits, shape (num_rois,
num_classes, mask_height, mask_width).
labels (list[Tensor]): Either predicted or ground truth label for
each predicted mask, of length num_rois.
Returns:
scores (Tensor): Uncertainty scores with the most uncertain
locations having the highest uncertainty score,
shape (num_rois, 1, mask_height, mask_width)
"""
if mask_pred.shape[1] == 1:
gt_class_logits = mask_pred.clone()
else:
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
return -torch.abs(gt_class_logits)
def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points,
oversample_ratio,
importance_sample_ratio):
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (list): The ground truth class for each instance.
num_points (int): The number of points to sample.
oversample_ratio (int): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled
via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = mask_pred.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=mask_pred.device)
point_logits = point_sample(mask_pred, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = get_uncertainty(point_logits, labels)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=mask_pred.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_roi_coords = torch.rand(
batch_size, num_random_points, 2, device=mask_pred.device)
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
return point_coords
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_batch_norm, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin',
'set_multi_processing', 'has_batch_norm', 'is_abs'
]
|
from . import assert_when_ready
def test_text_search(simple_index_with_docs): # noqa: F811
simple_index, docs = simple_index_with_docs
query_string = "Python is a valuable skill"
expected_text = docs[0].text
def pred():
docs, scores = simple_index.text_search(
query=query_string, search_field='text', limit=10
)
assert len(docs) == 1
assert docs[0].text == expected_text
assert scores[0] > 0
assert_when_ready(pred)
def test_text_search_batched(simple_index_with_docs): # noqa: F811
index, docs = simple_index_with_docs
queries = ['processing with Python', 'tips', 'for']
def pred():
docs, scores = index.text_search_batched(queries, search_field='text', limit=5)
assert len(docs) == 3
assert len(docs[0]) == 1
assert len(docs[1]) == 1
assert len(docs[2]) == 2
assert len(scores) == 3
assert len(scores[0]) == 1
assert len(scores[1]) == 1
assert len(scores[2]) == 2
assert_when_ready(pred)
|
from . import assert_when_ready
def test_text_search(simple_index_with_docs): # noqa: F811
simple_index, docs = simple_index_with_docs
query_string = "Python is a valuable skill"
expected_text = docs[0].text
def pred():
docs, scores = simple_index.text_search(
query=query_string, search_field='text', limit=1
)
assert len(docs) == 1
assert docs[0].text == expected_text
assert scores[0] > 0
assert_when_ready(pred)
def test_text_search_batched(simple_index_with_docs): # noqa: F811
index, docs = simple_index_with_docs
queries = ['processing with Python', 'tips', 'for']
def pred():
docs, scores = index.text_search_batched(queries, search_field='text', limit=5)
assert len(docs) == 3
assert len(docs[0]) == 1
assert len(docs[1]) == 1
assert len(docs[2]) == 2
assert len(scores) == 3
assert len(scores[0]) == 1
assert len(scores[1]) == 1
assert len(scores[2]) == 2
assert_when_ready(pred)
|
import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, StorageReader, StorageWriter]:
if storage:
if checkpoint_id is not None:
storage.reset(checkpoint_id)
return storage
if not checkpoint_id:
raise RuntimeError(
"`checkpoint_id` must be specificed if "
"storage_reader/storage_writer is None."
)
targets: list[type[Union[StorageReader, StorageWriter]]] = []
if reader:
targets = [
FileSystemReader,
]
else:
targets = [
FileSystemWriter,
]
try:
from ._fsspec_filesystem import FsspecReader, FsspecWriter
targets.append(FsspecReader if reader else FsspecWriter)
except Exception:
pass
for target in targets:
if target.validate_checkpoint_id(checkpoint_id):
storage = target(checkpoint_id) # type: ignore[call-arg]
storage.reset(checkpoint_id)
return storage
raise RuntimeError(
"Cannot detect which StorageReader or StorageWriter to use. "
"Please specify the storage_reader/storage_writer."
)
|
import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, StorageReader, StorageWriter]:
if storage:
if checkpoint_id is not None:
storage.reset(checkpoint_id)
return storage
if not checkpoint_id:
raise RuntimeError(
"`checkpoint_id` must be specified if "
"storage_reader/storage_writer is None."
)
targets: list[type[Union[StorageReader, StorageWriter]]] = []
if reader:
targets = [
FileSystemReader,
]
else:
targets = [
FileSystemWriter,
]
try:
from ._fsspec_filesystem import FsspecReader, FsspecWriter
targets.append(FsspecReader if reader else FsspecWriter)
except Exception:
pass
for target in targets:
if target.validate_checkpoint_id(checkpoint_id):
storage = target(checkpoint_id) # type: ignore[call-arg]
storage.reset(checkpoint_id)
return storage
raise RuntimeError(
"Cannot detect which StorageReader or StorageWriter to use. "
"Please specify the storage_reader/storage_writer."
)
|
import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.warning(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
func_retry = retry(
reraise=False,
stop=stop_after_attempt(5),
wait=wait_exponential(multiplier=1, min=1, max=30),
)
|
import asyncio
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(
resource_name: str,
action_name: str,
max_retry: int = 5,
multiplier: int = 1,
min_wait: float = 1,
max_wait: float = 30,
):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.warning(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
is_coroutine = asyncio.iscoroutinefunction(func)
retry_decorator = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=multiplier, min=min_wait, max=max_wait),
before_sleep=on_retry,
reraise=True,
)
wrapped_func = retry_decorator(func)
@wraps(func)
def sync_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
@wraps(func)
async def async_wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
try:
result = await wrapped_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return async_wrapper if is_coroutine else sync_wrapper
return decorator
|
import unittest
import torch
import torch.nn.functional as F
from diffusers import VQDiffusionScheduler
from .test_schedulers import SchedulerCommonTest
class VQDiffusionSchedulerTest(SchedulerCommonTest):
scheduler_classes = (VQDiffusionScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_vec_classes": 4097,
"num_train_timesteps": 100,
}
config.update(**kwargs)
return config
def dummy_sample(self, num_vec_classes):
batch_size = 4
height = 8
width = 8
sample = torch.randint(0, num_vec_classes, (batch_size, height * width))
return sample
@property
def dummy_sample_deter(self):
assert False
def dummy_model(self, num_vec_classes):
def model(sample, t, *args):
batch_size, num_latent_pixels = sample.shape
logits = torch.rand((batch_size, num_vec_classes - 1, num_latent_pixels))
return_value = F.log_softmax(logits.double(), dim=1).float()
return return_value
return model
def test_timesteps(self):
for timesteps in [2, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_num_vec_classes(self):
for num_vec_classes in [5, 100, 1000, 4000]:
self.check_over_configs(num_vec_classes=num_vec_classes)
def test_time_indices(self):
for t in [0, 50, 99]:
self.check_over_forward(time_step=t)
@unittest.skip("Test not supported.")
def test_add_noise_device(self):
pass
|
import torch
import torch.nn.functional as F
from diffusers import VQDiffusionScheduler
from .test_schedulers import SchedulerCommonTest
class VQDiffusionSchedulerTest(SchedulerCommonTest):
scheduler_classes = (VQDiffusionScheduler,)
def get_scheduler_config(self, **kwargs):
config = {
"num_vec_classes": 4097,
"num_train_timesteps": 100,
}
config.update(**kwargs)
return config
def dummy_sample(self, num_vec_classes):
batch_size = 4
height = 8
width = 8
sample = torch.randint(0, num_vec_classes, (batch_size, height * width))
return sample
@property
def dummy_sample_deter(self):
assert False
def dummy_model(self, num_vec_classes):
def model(sample, t, *args):
batch_size, num_latent_pixels = sample.shape
logits = torch.rand((batch_size, num_vec_classes - 1, num_latent_pixels))
return_value = F.log_softmax(logits.double(), dim=1).float()
return return_value
return model
def test_timesteps(self):
for timesteps in [2, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_num_vec_classes(self):
for num_vec_classes in [5, 100, 1000, 4000]:
self.check_over_configs(num_vec_classes=num_vec_classes)
def test_time_indices(self):
for t in [0, 50, 99]:
self.check_over_forward(time_step=t)
def test_add_noise_device(self):
pass
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.parsers.helper import KVAppendAction
def mixin_base_runtime_parser(arg_group):
"""Mixing in arguments required by any class that extends :class:`AsynNewLoopRuntime` into the given parser.
:param arg_group: the parser instance to which we add arguments
"""
arg_group.add_argument(
'--port-in',
type=int,
default=helper.random_port(),
dest='port',
help='The port for input data to bind to, default a random port between [49152, 65535]',
)
arg_group.add_argument(
'--host-in',
type=str,
default=__default_host__,
help=f'The host address for binding to, by default it is {__default_host__}',
)
arg_group.add_argument(
'--grpc-server-options',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help="Dictionary of kwargs arguments that will be passed to the grpc server as options when starting the server, example : {'grpc.max_send_message_length': -1}",
default=None,
)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AutoContrastTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.AutoContrast,
init_kwargs={
"value_range": (20, 200),
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_constant_channels_dont_get_nanned(self):
img = np.array([1, 1], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
def test_auto_contrast_expands_value_range(self):
img = np.array([0, 128], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_different_values_per_channel(self):
img = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype="float32",
)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 255.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 255.0))
self.assertAllClose(
ys,
[
[
[[0.0, 0.0, 0.0], [85.0, 85.0, 85.0]],
[[170.0, 170.0, 170.0], [255.0, 255.0, 255.0]],
]
],
)
def test_auto_contrast_expands_value_range_uint8(self):
img = np.array([0, 128], dtype="uint8")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_properly_converts_value_range(self):
img = np.array([0, 0.5], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 1))
ys = layer(img)
self.assertAllClose(
ops.convert_to_numpy(ys[0]), np.array([[[0.0]], [[1]]])
)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import ops
from keras.src import testing
class AutoContrastTest(testing.TestCase, parameterized.TestCase):
@pytest.mark.requires_trainable_backend
def test_layer(self):
self.run_layer_test(
layers.AutoContrast,
init_kwargs={
"value_range": (20, 200),
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
)
def test_constant_channels_dont_get_nanned(self):
img = np.array([1, 1], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
def test_auto_contrast_expands_value_range(self):
img = np.array([0, 128], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_different_values_per_channel(self):
img = np.array(
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]],
dtype="float32",
)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 0]) == 255.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0, ..., 1]) == 255.0))
self.assertAllClose(
ys,
[
[
[[0.0, 0.0, 0.0], [85.0, 85.0, 85.0]],
[[170.0, 170.0, 170.0], [255.0, 255.0, 255.0]],
]
],
)
def test_auto_contrast_expands_value_range_uint8(self):
img = np.array([0, 128], dtype="uint8")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 255))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 255.0))
def test_auto_contrast_properly_converts_value_range(self):
img = np.array([0, 0.5], dtype="float32")
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=-1)
img = np.expand_dims(img, axis=0)
layer = layers.AutoContrast(value_range=(0, 1))
ys = layer(img)
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 0.0))
self.assertTrue(np.any(ops.convert_to_numpy(ys[0]) == 1.0))
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
mesh_url=Mesh3DUrl,
point_cloud_url=PointCloud3DUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
mesh_url=Mesh3DUrl,
point_cloud_url=PointCloud3DUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.registry import MODELS
detector = MODELS.build(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
assert isinstance(batch_results, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from mmengine.config import ConfigDict
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestCornerNet(TestCase):
def setUp(self) -> None:
register_all_modules()
model_cfg = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
backbone = dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch')
neck = dict(
type='FPN',
in_channels=[512],
out_channels=256,
start_level=0,
add_extra_convs='on_input',
num_outs=1)
model_cfg.backbone = ConfigDict(**backbone)
model_cfg.neck = ConfigDict(**neck)
model_cfg.bbox_head.num_feat_levels = 1
self.model_cfg = model_cfg
def test_init(self):
model = get_detector_cfg(
'cornernet/cornernet_hourglass104_8xb6-210e-mstest_coco.py')
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
self.assertTrue(detector.bbox_head is not None)
self.assertTrue(detector.backbone is not None)
self.assertTrue(not hasattr(detector, 'neck'))
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_loss_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 511, 511], [3, 511, 511]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
assert isinstance(losses, dict)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_predict_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
assert len(batch_results) == 2
assert isinstance(batch_results[0], DetDataSample)
@unittest.skipIf(not torch.cuda.is_available(),
'test requires GPU and torch+cuda')
def test_cornernet_forward_tensor_mode(self):
from mmdet.models import build_detector
detector = build_detector(self.model_cfg)
detector.init_weights()
packed_inputs = demo_mm_inputs(2, [[3, 512, 512], [3, 512, 512]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
assert isinstance(batch_results, tuple)
|
import logging
import os
import torch
from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
)
def _get_phones():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
)
def _to_tensor(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths
def _load_phonemizer(file, dl_kwargs):
if not _mod_utils.is_module_available("dp"):
raise RuntimeError("DeepPhonemizer is not installed. Please install it.")
from dp.phonemizer import Phonemizer
from dp.preprocessing.text import Preprocessor, LanguageTokenizer, SequenceTokenizer
# By default, dp issues DEBUG level log.
logger = logging.getLogger("dp")
orig_level = logger.level
logger.setLevel(logging.INFO)
try:
url = f"https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}"
directory = os.path.join(torch.hub.get_dir(), "checkpoints")
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, file)
if not os.path.exists(path):
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
download_url_to_file(url, path, **dl_kwargs)
with torch.serialization.safe_globals([Preprocessor, LanguageTokenizer, SequenceTokenizer]):
return Phonemizer.from_checkpoint(path)
finally:
logger.setLevel(orig_level)
def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
waveform = torch.clamp(waveform, -1, 1)
waveform = (waveform + 1.0) * (2**bits - 1) / 2
return torch.clamp(waveform, 0, 2**bits - 1).int()
def _get_taco_params(n_symbols):
return {
"mask_padding": False,
"n_mels": 80,
"n_frames_per_step": 1,
"symbol_embedding_dim": 512,
"encoder_embedding_dim": 512,
"encoder_n_convolution": 3,
"encoder_kernel_size": 5,
"decoder_rnn_dim": 1024,
"decoder_max_step": 2000,
"decoder_dropout": 0.1,
"decoder_early_stopping": True,
"attention_rnn_dim": 1024,
"attention_hidden_dim": 128,
"attention_location_n_filter": 32,
"attention_location_kernel_size": 31,
"attention_dropout": 0.1,
"prenet_dim": 256,
"postnet_n_convolution": 5,
"postnet_kernel_size": 5,
"postnet_embedding_dim": 512,
"gate_threshold": 0.5,
"n_symbol": n_symbols,
}
def _get_wrnn_params():
return {
"upsample_scales": [5, 5, 11],
"n_classes": 2**8, # n_bits = 8
"hop_length": 275,
"n_res_block": 10,
"n_rnn": 512,
"n_fc": 512,
"kernel_size": 5,
"n_freq": 80,
"n_hidden": 128,
"n_output": 128,
}
|
import logging
import os
import torch
from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
)
def _get_phones():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
)
def _to_tensor(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths
def _load_phonemizer(file, dl_kwargs):
if not _mod_utils.is_module_available("dp"):
raise RuntimeError("DeepPhonemizer is not installed. Please install it.")
from dp.phonemizer import Phonemizer
# By default, dp issues DEBUG level log.
logger = logging.getLogger("dp")
orig_level = logger.level
logger.setLevel(logging.INFO)
try:
url = f"https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}"
directory = os.path.join(torch.hub.get_dir(), "checkpoints")
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, file)
if not os.path.exists(path):
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
download_url_to_file(url, path, **dl_kwargs)
return Phonemizer.from_checkpoint(path)
finally:
logger.setLevel(orig_level)
def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
waveform = torch.clamp(waveform, -1, 1)
waveform = (waveform + 1.0) * (2**bits - 1) / 2
return torch.clamp(waveform, 0, 2**bits - 1).int()
def _get_taco_params(n_symbols):
return {
"mask_padding": False,
"n_mels": 80,
"n_frames_per_step": 1,
"symbol_embedding_dim": 512,
"encoder_embedding_dim": 512,
"encoder_n_convolution": 3,
"encoder_kernel_size": 5,
"decoder_rnn_dim": 1024,
"decoder_max_step": 2000,
"decoder_dropout": 0.1,
"decoder_early_stopping": True,
"attention_rnn_dim": 1024,
"attention_hidden_dim": 128,
"attention_location_n_filter": 32,
"attention_location_kernel_size": 31,
"attention_dropout": 0.1,
"prenet_dim": 256,
"postnet_n_convolution": 5,
"postnet_kernel_size": 5,
"postnet_embedding_dim": 512,
"gate_threshold": 0.5,
"n_symbol": n_symbols,
}
def _get_wrnn_params():
return {
"upsample_scales": [5, 5, 11],
"n_classes": 2**8, # n_bits = 8
"hop_length": 275,
"n_res_block": 10,
"n_rnn": 512,
"n_fc": 512,
"kernel_size": 5,
"n_freq": 80,
"n_hidden": 128,
"n_output": 128,
}
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].dict()['hello'] == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
@pytest.mark.proto
def test_union_type_error():
from typing import Union
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
with pytest.raises(ValueError):
DocList[CustomDoc].from_protobuf(docs.to_protobuf())
class BasisUnion(BaseDoc):
ud: Union[int, str]
docs_basic = DocList[BasisUnion]([BasisUnion(ud="hello")])
docs_copy = DocList[BasisUnion].from_protobuf(docs_basic.to_protobuf())
assert docs_copy == docs_basic
|
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.base_doc import AnyDoc
from docarray.documents import ImageDoc, TextDoc
from docarray.typing import NdArray
@pytest.mark.proto
def test_simple_proto():
class CustomDoc(BaseDoc):
text: str
tensor: NdArray
da = DocList(
[CustomDoc(text='hello', tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
new_da = DocList[CustomDoc].from_protobuf(da.to_protobuf())
for doc1, doc2 in zip(da, new_da):
assert doc1.text == doc2.text
assert (doc1.tensor == doc2.tensor).all()
@pytest.mark.proto
def test_nested_proto():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList[CustomDocument].from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_nested_proto_any_doc():
class CustomDocument(BaseDoc):
text: TextDoc
image: ImageDoc
da = DocList[CustomDocument](
[
CustomDocument(
text=TextDoc(text='hello'),
image=ImageDoc(tensor=np.zeros((3, 224, 224))),
)
for _ in range(10)
]
)
DocList.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_any_doc_list_proto():
doc = AnyDoc(hello='world')
pt = DocList([doc]).to_protobuf()
docs = DocList.from_protobuf(pt)
assert docs[0].dict()['hello'] == 'world'
@pytest.mark.proto
def test_any_nested_doc_list_proto():
from docarray import BaseDoc, DocList
class TextDocWithId(BaseDoc):
id: str
text: str
class ResultTestDoc(BaseDoc):
matches: DocList[TextDocWithId]
index_da = DocList[TextDocWithId](
[TextDocWithId(id=f'{i}', text=f'ID {i}') for i in range(10)]
)
out_da = DocList[ResultTestDoc]([ResultTestDoc(matches=index_da[0:2])])
pb = out_da.to_protobuf()
docs = DocList.from_protobuf(pb)
assert docs[0].matches[0].id == '0'
assert len(docs[0].matches) == 2
assert len(docs) == 1
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, Type, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_mesh_extension = any(url.endswith(ext) for ext in MESH_FILE_FORMATS)
if not has_mesh_extension:
raise ValueError(
f'{cls.__name__} must have one of the following extensions:'
f'{MESH_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
import pytest
import subprocess
import os
from typing import Generator
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.storage.index_store.gel import (
GelIndexStore,
)
from llama_index.storage.kvstore.gel import GelKVStore
try:
import gel # noqa
no_packages = False
except ImportError:
no_packages = True
skip_in_cicd = os.environ.get("CI") is not None
try:
if not skip_in_cicd:
subprocess.run(["gel", "project", "init", "--non-interactive"], check=True)
except subprocess.CalledProcessError as e:
print(e)
@pytest.fixture()
def gel_kvstore() -> Generator[GelKVStore, None, None]:
kvstore = None
try:
kvstore = GelKVStore()
yield kvstore
finally:
if kvstore:
keys = kvstore.get_all().keys()
for key in keys:
kvstore.delete(key)
@pytest.fixture()
def gel_indexstore(gel_kvstore: GelKVStore) -> GelIndexStore:
return GelIndexStore(gel_kvstore=gel_kvstore)
@pytest.mark.skipif(no_packages or skip_in_cicd, reason="gel not installed")
def test_gel_index_store(gel_indexstore: GelIndexStore) -> None:
index_struct = IndexGraph()
index_store = gel_indexstore
index_store.add_index_struct(index_struct)
assert index_store.get_index_struct(struct_id=index_struct.index_id) == index_struct
|
import pytest
import subprocess
from typing import Generator
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.storage.index_store.gel import (
GelIndexStore,
)
from llama_index.storage.kvstore.gel import GelKVStore
try:
import gel # noqa
no_packages = False
except ImportError:
no_packages = True
try:
subprocess.run(["gel", "project", "init", "--non-interactive"], check=True)
except subprocess.CalledProcessError as e:
print(e)
@pytest.fixture()
def gel_kvstore() -> Generator[GelKVStore, None, None]:
kvstore = None
try:
kvstore = GelKVStore()
yield kvstore
finally:
if kvstore:
keys = kvstore.get_all().keys()
for key in keys:
kvstore.delete(key)
@pytest.fixture()
def gel_indexstore(gel_kvstore: GelKVStore) -> GelIndexStore:
return GelIndexStore(gel_kvstore=gel_kvstore)
@pytest.mark.skipif(no_packages, reason="gel not installed")
def test_gel_index_store(gel_indexstore: GelIndexStore) -> None:
index_struct = IndexGraph()
index_store = gel_indexstore
index_store.add_index_struct(index_struct)
assert index_store.get_index_struct(struct_id=index_struct.index_id) == index_struct
|
import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
'dynamic_ef_max': 300,
'dynamic_ef_factor': 4,
'vector_cache_max_objects': 1000000,
'flat_search_cutoff': 20000,
'cleanup_interval_seconds': 1000,
'skip': True,
'distance': 'l2-squared',
},
)
result = requests.get('http://localhost:8080/v1/schema').json()
classes = result.get('classes', [])
main_class = list(
filter(lambda class_element: class_element['class'] == da._config.name, classes)
)
assert len(main_class) == 1
main_class = main_class[0]
assert main_class.get('vectorIndexConfig', {}).get('maxConnections') == 16
assert main_class.get('vectorIndexConfig', {}).get('efConstruction') == 100
assert main_class.get('vectorIndexConfig', {}).get('ef') == 100
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMin') == 50
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMax') == 300
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfFactor') == 4
assert (
main_class.get('vectorIndexConfig', {}).get('vectorCacheMaxObjects') == 1000000
)
assert main_class.get('vectorIndexConfig', {}).get('flatSearchCutoff') == 20000
assert main_class.get('vectorIndexConfig', {}).get('cleanupIntervalSeconds') == 1000
assert main_class.get('vectorIndexConfig', {}).get('skip') is True
assert main_class.get('vectorIndexConfig', {}).get('distance') == 'l2-squared'
|
import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
'dynamic_ef_max': 300,
'dynamic_ef_factor': 4,
'vector_cache_max_objects': 1000000,
'flat_search_cutoff': 20000,
'cleanup_interval_seconds': 1000,
'skip': True,
},
)
result = requests.get('http://localhost:8080/v1/schema').json()
classes = result.get('classes', [])
main_class = list(
filter(lambda class_element: class_element['class'] == da._config.name, classes)
)
assert len(main_class) == 1
main_class = main_class[0]
assert main_class.get('vectorIndexConfig', {}).get('maxConnections') == 16
assert main_class.get('vectorIndexConfig', {}).get('efConstruction') == 100
assert main_class.get('vectorIndexConfig', {}).get('ef') == 100
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMin') == 50
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMax') == 300
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfFactor') == 4
assert (
main_class.get('vectorIndexConfig', {}).get('vectorCacheMaxObjects') == 1000000
)
assert main_class.get('vectorIndexConfig', {}).get('flatSearchCutoff') == 20000
assert main_class.get('vectorIndexConfig', {}).get('cleanupIntervalSeconds') == 1000
assert main_class.get('vectorIndexConfig', {}).get('skip') is True
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset',
'CocoDataset',
'DeepFashionDataset',
'VOCDataset',
'CityscapesDataset',
'LVISDataset',
'LVISV05Dataset',
'LVISV1Dataset',
'WIDERFaceDataset',
'get_loading_pipeline',
'CocoPanopticDataset',
'MultiImageMixDataset',
'OpenImagesDataset',
'OpenImagesChallengeDataset',
'AspectRatioBatchSampler',
'ClassAwareSampler',
'MultiSourceSampler',
'GroupMultiSourceSampler',
'BaseDetDataset',
'CrowdHumanDataset',
'Objects365V1Dataset',
'Objects365V2Dataset',
'DSDLDetDataset',
'BaseVideoDataset',
'MOTChallengeDataset',
'TrackImgSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_det_dataset import BaseDetDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'XMLDataset',
'CocoDataset',
'DeepFashionDataset',
'VOCDataset',
'CityscapesDataset',
'LVISDataset',
'LVISV05Dataset',
'LVISV1Dataset',
'WIDERFaceDataset',
'get_loading_pipeline',
'CocoPanopticDataset',
'MultiImageMixDataset',
'OpenImagesDataset',
'OpenImagesChallengeDataset',
'AspectRatioBatchSampler',
'ClassAwareSampler',
'MultiSourceSampler',
'GroupMultiSourceSampler',
'BaseDetDataset',
'CrowdHumanDataset',
'Objects365V1Dataset',
'Objects365V2Dataset',
'DSDLDetDataset',
]
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTensorFlowTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_tensorflow_tensor')
class VideoTensorFlowTensor(
TensorFlowTensor, VideoTensorMixin, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoTensorFlowTensor, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTensorFlowTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=tf.random.normal((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoTensorFlowTensor, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTensorFlowTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_tensorflow_tensor')
class VideoTensorFlowTensor(
TensorFlowTensor, VideoTensorMixin, metaclass=metaTensorFlow
):
"""
Subclass of TensorFlowTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import tensorflow as tf
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import VideoTensorFlowTensor, VideoUrl
class MyVideoDoc(BaseDocument):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTensorFlowTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=tf.random.normal((100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoTensorFlowTensor, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to
[`ImageBytes`][docarray.typing.ImageBytes] which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool = False
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
|
import datetime
from typing import List
import prisma.enums
import pydantic
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[97]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
class MyAgent(pydantic.BaseModel):
agent_id: str
agent_version: int
agent_name: str
last_edited: datetime.datetime
class MyAgentsResponse(pydantic.BaseModel):
agents: list[MyAgent]
pagination: Pagination
class StoreAgent(pydantic.BaseModel):
slug: str
agent_name: str
agent_image: str
creator: str
creator_avatar: str
sub_heading: str
description: str
runs: int
rating: float
class StoreAgentsResponse(pydantic.BaseModel):
agents: list[StoreAgent]
pagination: Pagination
class StoreAgentDetails(pydantic.BaseModel):
store_listing_version_id: str
slug: str
agent_name: str
agent_video: str
agent_image: list[str]
creator: str
creator_avatar: str
sub_heading: str
description: str
categories: list[str]
runs: int
rating: float
versions: list[str]
last_updated: datetime.datetime
class Creator(pydantic.BaseModel):
name: str
username: str
description: str
avatar_url: str
num_agents: int
agent_rating: float
agent_runs: int
is_featured: bool
class CreatorsResponse(pydantic.BaseModel):
creators: List[Creator]
pagination: Pagination
class CreatorDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
agent_rating: float
agent_runs: int
top_categories: list[str]
class Profile(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str
is_featured: bool
class StoreSubmission(pydantic.BaseModel):
agent_id: str
agent_version: int
name: str
sub_heading: str
slug: str
description: str
image_urls: list[str]
date_submitted: datetime.datetime
status: prisma.enums.SubmissionStatus
runs: int
rating: float
class StoreSubmissionsResponse(pydantic.BaseModel):
submissions: list[StoreSubmission]
pagination: Pagination
class StoreSubmissionRequest(pydantic.BaseModel):
agent_id: str
agent_version: int
slug: str
name: str
sub_heading: str
video_url: str | None = None
image_urls: list[str] = []
description: str = ""
categories: list[str] = []
class ProfileDetails(pydantic.BaseModel):
name: str
username: str
description: str
links: list[str]
avatar_url: str | None = None
class StoreReview(pydantic.BaseModel):
score: int
comments: str | None = None
class StoreReviewCreate(pydantic.BaseModel):
store_listing_version_id: str
score: int
comments: str | None = None
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
LlamaIndexGraphRetriever,
LlamaIndexRetriever,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LlamaIndexRetriever": "langchain_community.retrievers",
"LlamaIndexGraphRetriever": "langchain_community.retrievers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LlamaIndexGraphRetriever",
"LlamaIndexRetriever",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.retrievers import (
LlamaIndexGraphRetriever,
LlamaIndexRetriever,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LlamaIndexRetriever": "langchain_community.retrievers",
"LlamaIndexGraphRetriever": "langchain_community.retrievers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LlamaIndexRetriever",
"LlamaIndexGraphRetriever",
]
|
from llama_index.core.storage.chat_store.base import BaseChatStore
from llama_index.core.storage.chat_store.simple_chat_store import SimpleChatStore
RECOGNIZED_CHAT_STORES = {
SimpleChatStore.class_name(): SimpleChatStore,
}
def load_chat_store(data: dict) -> BaseChatStore:
"""Load a chat store from a dict."""
chat_store_name = data.get("class_name")
if chat_store_name is None:
raise ValueError("ChatStore loading requires a class_name")
if chat_store_name not in RECOGNIZED_CHAT_STORES:
raise ValueError(f"Invalid ChatStore name: {chat_store_name}")
return RECOGNIZED_CHAT_STORES[chat_store_name].from_dict(data)
|
from llama_index.core.storage.chat_store.base import BaseChatStore
from llama_index.core.storage.chat_store.simple_chat_store import SimpleChatStore
RECOGNIZED_CHAT_STORES = {
SimpleChatStore.class_name(): SimpleChatStore,
}
def load_chat_store(data: dict) -> BaseChatStore:
"""Load a chat store from a dict."""
chat_store_name = data.get("class_name", None)
if chat_store_name is None:
raise ValueError("ChatStore loading requires a class_name")
if chat_store_name not in RECOGNIZED_CHAT_STORES:
raise ValueError(f"Invalid ChatStore name: {chat_store_name}")
return RECOGNIZED_CHAT_STORES[chat_store_name].from_dict(data)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__
@property
def allow_symlink(self):
return self._allow_symlink
prefix_to_backends = {'s3': MockPetrel}
class TestCheckpointHook:
@patch('mmengine.fileio.file_client.FileClient._prefix_to_backends',
prefix_to_backends)
def test_before_train(self, tmp_path):
runner = Mock()
work_dir = str(tmp_path)
runner.work_dir = work_dir
# the out_dir of the checkpoint hook is None
checkpoint_hook = CheckpointHook(interval=1, by_epoch=True)
checkpoint_hook.before_train(runner)
assert checkpoint_hook.out_dir == runner.work_dir
# the out_dir of the checkpoint hook is not None
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir')
checkpoint_hook.before_train(runner)
assert checkpoint_hook.out_dir == (
f'test_dir/{osp.basename(work_dir)}')
def test_after_train_epoch(self, tmp_path):
runner = Mock()
work_dir = str(tmp_path)
runner.work_dir = tmp_path
runner.epoch = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/epoch_10.pth')
# epoch can not be evenly divided by 2
runner.epoch = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/epoch_10.pth')
# by epoch is False
runner.epoch = 9
runner.meta = dict()
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert runner.meta.get('hook_msgs', None) is None
# max_keep_ckpts > 0
runner.work_dir = work_dir
os.system(f'touch {work_dir}/epoch_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=True, max_keep_ckpts=1)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert not os.path.exists(f'{work_dir}/epoch_8.pth')
def test_after_train_iter(self, tmp_path):
work_dir = str(tmp_path)
runner = Mock()
runner.work_dir = str(work_dir)
runner.iter = 9
batch_idx = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert runner.meta.get('hook_msgs', None) is None
# by epoch is False
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert (runner.iter + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/iter_10.pth')
# epoch can not be evenly divided by 2
runner.iter = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/iter_10.pth')
# max_keep_ckpts > 0
runner.iter = 9
runner.work_dir = work_dir
os.system(f'touch {work_dir}/iter_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=False, max_keep_ckpts=1)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert not os.path.exists(f'{work_dir}/iter_8.pth')
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from unittest.mock import Mock, patch
from mmengine.hooks import CheckpointHook
class MockPetrel:
_allow_symlink = False
def __init__(self):
pass
@property
def name(self):
return self.__class__.__name__
@property
def allow_symlink(self):
return self._allow_symlink
prefix_to_backends = {'s3': MockPetrel}
class TestCheckpointHook:
@patch('mmengine.fileio.file_client.FileClient._prefix_to_backends',
prefix_to_backends)
def test_before_train(self, tmp_path):
runner = Mock()
work_dir = str(tmp_path)
runner.work_dir = work_dir
# the out_dir of the checkpoint hook is None
checkpoint_hook = CheckpointHook(interval=1, by_epoch=True)
checkpoint_hook.before_train(runner)
assert checkpoint_hook.out_dir == runner.work_dir
# the out_dir of the checkpoint hook is not None
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir')
checkpoint_hook.before_train(runner)
assert checkpoint_hook.out_dir == (
f'test_dir/{osp.basename(work_dir)}')
# create_symlink in args and create_symlink is True
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, out_dir='test_dir', create_symlink=True)
checkpoint_hook.before_train(runner)
assert checkpoint_hook.args['create_symlink']
runner.work_dir = 's3://path/of/file'
checkpoint_hook = CheckpointHook(
interval=1, by_epoch=True, create_symlink=True)
checkpoint_hook.before_train(runner)
assert not checkpoint_hook.args['create_symlink']
def test_after_train_epoch(self, tmp_path):
runner = Mock()
work_dir = str(tmp_path)
runner.work_dir = tmp_path
runner.epoch = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/epoch_10.pth')
# epoch can not be evenly divided by 2
runner.epoch = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/epoch_10.pth')
# by epoch is False
runner.epoch = 9
runner.meta = dict()
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert runner.meta.get('hook_msgs', None) is None
# max_keep_ckpts > 0
runner.work_dir = work_dir
os.system(f'touch {work_dir}/epoch_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=True, max_keep_ckpts=1)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_epoch(runner)
assert (runner.epoch + 1) % 2 == 0
assert not os.path.exists(f'{work_dir}/epoch_8.pth')
def test_after_train_iter(self, tmp_path):
work_dir = str(tmp_path)
runner = Mock()
runner.work_dir = str(work_dir)
runner.iter = 9
batch_idx = 9
runner.meta = dict()
runner.model = Mock()
# by epoch is True
checkpoint_hook = CheckpointHook(interval=2, by_epoch=True)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert runner.meta.get('hook_msgs', None) is None
# by epoch is False
checkpoint_hook = CheckpointHook(interval=2, by_epoch=False)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert (runner.iter + 1) % 2 == 0
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/iter_10.pth')
# epoch can not be evenly divided by 2
runner.iter = 10
checkpoint_hook.after_train_epoch(runner)
assert runner.meta['hook_msgs']['last_ckpt'] == (
f'{work_dir}/iter_10.pth')
# max_keep_ckpts > 0
runner.iter = 9
runner.work_dir = work_dir
os.system(f'touch {work_dir}/iter_8.pth')
checkpoint_hook = CheckpointHook(
interval=2, by_epoch=False, max_keep_ckpts=1)
checkpoint_hook.before_train(runner)
checkpoint_hook.after_train_iter(runner, batch_idx=batch_idx)
assert not os.path.exists(f'{work_dir}/iter_8.pth')
|
import os
import sys
import pkg_resources
from setuptools import find_packages, setup
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux"):
triton_requirement = "triton>=2.0.0.dev20221202"
try:
import re
import subprocess
version_line = (
subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1]
)
major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0]
if (int(major), int(minor)) < (11, 4):
# the last version supporting CUDA < 11.4
triton_requirement = "triton==2.0.0.dev20221011"
except (IndexError, OSError, subprocess.SubprocessError):
pass
requirements.append(triton_requirement)
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.7",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=requirements
+ [
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy", "black", "flake8", "isort"]},
)
|
import os
import sys
import pkg_resources
from setuptools import setup, find_packages
def read_version(fname="whisper/version.py"):
exec(compile(open(fname, encoding="utf-8").read(), fname, "exec"))
return locals()["__version__"]
requirements = []
if sys.platform.startswith("linux"):
triton_requirement = "triton>=2.0.0.dev20221202"
try:
import re
import subprocess
version_line = subprocess.check_output(["nvcc", "--version"]).strip().split(b"\n")[-1]
major, minor = re.findall(rb"([\d]+)\.([\d]+)", version_line)[0]
if (int(major), int(minor)) < (11, 4):
# the last version supporting CUDA < 11.4
triton_requirement = "triton==2.0.0.dev20221011"
except (IndexError, OSError, subprocess.SubprocessError):
pass
requirements.append(triton_requirement)
setup(
name="openai-whisper",
py_modules=["whisper"],
version=read_version(),
description="Robust Speech Recognition via Large-Scale Weak Supervision",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
readme="README.md",
python_requires=">=3.7",
author="OpenAI",
url="https://github.com/openai/whisper",
license="MIT",
packages=find_packages(exclude=["tests*"]),
install_requires=requirements + [
str(r)
for r in pkg_resources.parse_requirements(
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
)
],
entry_points={
"console_scripts": ["whisper=whisper.transcribe:cli"],
},
include_package_data=True,
extras_require={"dev": ["pytest", "scipy"]},
)
|
from jina import Executor, requests
from docarray import DocList
from docarray.documents import TextDoc
class MyExecutor(Executor):
@requests
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]:
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
return docs
|
from jina import Executor, requests
from docarray import DocList
from docarray.documents import TextDoc
class MyExecutor(Executor):
@requests
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[TextDoc]:
docs[0].text = 'hello, world!'
docs[1].text = 'goodbye, world!'
return docs
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
from backend.integrations.providers import ProviderName
ExaCredentials = APIKeyCredentials
ExaCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.EXA],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="exa",
api_key=SecretStr("mock-exa-api-key"),
title="Mock Exa API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
def ExaCredentialsField() -> ExaCredentialsInput:
"""Creates an Exa credentials input on a block."""
return CredentialsField(description="The Exa integration requires an API Key.")
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import APIKeyCredentials, CredentialsField, CredentialsMetaInput
ExaCredentials = APIKeyCredentials
ExaCredentialsInput = CredentialsMetaInput[
Literal["exa"],
Literal["api_key"],
]
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="exa",
api_key=SecretStr("mock-exa-api-key"),
title="Mock Exa API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.title,
}
def ExaCredentialsField() -> ExaCredentialsInput:
"""Creates an Exa credentials input on a block."""
return CredentialsField(
provider="exa",
supported_credential_types={"api_key"},
description="The Exa integration requires an API Key.",
)
|
from docarray import BaseDocument
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(BaseDocument):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
|
from docarray import Document
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(Document):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
|
import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
_NORMALIZED_TRANSCRIPT = [
"Test transcript one",
"Test transcript two",
"Test transcript three",
"In fourteen sixty-five Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
def get_mock_dataset(root_dir):
"""
root_dir: path to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LJSpeech-1.1")
archive_dir = os.path.join(base_dir, "wavs")
os.makedirs(archive_dir, exist_ok=True)
metadata_path = os.path.join(base_dir, "metadata.csv")
sample_rate = 22050
with open(metadata_path, mode="w", newline="") as metadata_file:
metadata_writer = csv.writer(metadata_file, delimiter="|", quoting=csv.QUOTE_NONE)
for i, (transcript, normalized_transcript) in enumerate(zip(_TRANSCRIPTS, _NORMALIZED_TRANSCRIPT)):
fileid = f"LJ001-{i:04d}"
metadata_writer.writerow([fileid, transcript, normalized_transcript])
filename = fileid + ".wav"
path = os.path.join(archive_dir, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=1, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, sample_rate)
mocked_data.append(normalize_wav(data))
return mocked_data, _TRANSCRIPTS, _NORMALIZED_TRANSCRIPT
class TestLJSpeech(TempDirMixin, TorchaudioTestCase):
root_dir = None
data, _transcripts, _normalized_transcript = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._transcripts, cls._normalized_transcript = get_mock_dataset(cls.root_dir)
def _test_ljspeech(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, normalized_transcript) in enumerate(dataset):
expected_transcript = self._transcripts[i]
expected_normalized_transcript = self._normalized_transcript[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == sample_rate
assert transcript == expected_transcript
assert normalized_transcript == expected_normalized_transcript
n_ite += 1
assert n_ite == len(self.data)
def test_ljspeech_str(self):
dataset = ljspeech.LJSPEECH(self.root_dir)
self._test_ljspeech(dataset)
def test_ljspeech_path(self):
dataset = ljspeech.LJSPEECH(Path(self.root_dir))
self._test_ljspeech(dataset)
|
import csv
import os
from pathlib import Path
from torchaudio.datasets import ljspeech
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
_NORMALIZED_TRANSCRIPT = [
"Test transcript one",
"Test transcript two",
"Test transcript three",
"In fourteen sixty-five Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,",
]
def get_mock_dataset(root_dir):
"""
root_dir: path to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LJSpeech-1.1")
archive_dir = os.path.join(base_dir, "wavs")
os.makedirs(archive_dir, exist_ok=True)
metadata_path = os.path.join(base_dir, "metadata.csv")
sample_rate = 22050
with open(metadata_path, mode="w", newline="") as metadata_file:
metadata_writer = csv.writer(metadata_file, delimiter="|", quoting=csv.QUOTE_NONE)
for i, (transcript, normalized_transcript) in enumerate(zip(_TRANSCRIPTS, _NORMALIZED_TRANSCRIPT)):
fileid = f"LJ001-{i:04d}"
metadata_writer.writerow([fileid, transcript, normalized_transcript])
filename = fileid + ".wav"
path = os.path.join(archive_dir, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=1, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, sample_rate)
mocked_data.append(normalize_wav(data))
return mocked_data, _TRANSCRIPTS, _NORMALIZED_TRANSCRIPT
class TestLJSpeech(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data, _transcripts, _normalized_transcript = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._transcripts, cls._normalized_transcript = get_mock_dataset(cls.root_dir)
def _test_ljspeech(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, normalized_transcript) in enumerate(dataset):
expected_transcript = self._transcripts[i]
expected_normalized_transcript = self._normalized_transcript[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == sample_rate
assert transcript == expected_transcript
assert normalized_transcript == expected_normalized_transcript
n_ite += 1
assert n_ite == len(self.data)
def test_ljspeech_str(self):
dataset = ljspeech.LJSPEECH(self.root_dir)
self._test_ljspeech(dataset)
def test_ljspeech_path(self):
dataset = ljspeech.LJSPEECH(Path(self.root_dir))
self._test_ljspeech(dataset)
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def inference_free_splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def inference_free_splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/inference-free-splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
@pytest.fixture(scope="session")
def csr_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
"""
this test check the docstring of all of our public API. It does it
by checking the `__all__` of each of our namespace.
to add a new namespace you need to
* import it
* add it to the `SUB_MODULE_TO_CHECK` list
"""
import pytest
from mktestdocs import check_docstring, get_codeblock_members
import docarray.data
import docarray.documents
import docarray.index
import docarray.store
import docarray.typing
from docarray.utils import filter, find, map
SUB_MODULE_TO_CHECK = [
docarray,
docarray.index,
docarray.data,
docarray.documents,
docarray.store,
docarray.typing,
find,
map,
filter,
]
def get_obj_to_check(lib):
obj_to_check = []
all_test = getattr(lib, '__all__')
try:
all_test = getattr(lib, '__all_test__')
except (AttributeError, ImportError):
pass
for obj in all_test:
obj_to_check.append(getattr(lib, obj))
return obj_to_check
obj_to_check = []
for lib in SUB_MODULE_TO_CHECK:
obj_to_check.extend(get_obj_to_check(lib))
members = []
for obj in obj_to_check:
members.extend(get_codeblock_members(obj))
@pytest.mark.parametrize("obj", members, ids=lambda d: d.__qualname__)
def test_member(obj):
check_docstring(obj)
|
"""
this test check the docstring of all of our public API. It does it
by checking the `__all__` of each of our namespace.
to add a new namespace you need to
* import it
* add it to the `SUB_MODULE_TO_CHECK` list
"""
import pytest
from mktestdocs import check_docstring, get_codeblock_members
import docarray.data
import docarray.documents
import docarray.index
import docarray.store
import docarray.typing
from docarray.utils import filter, find, map
SUB_MODULE_TO_CHECK = [
docarray,
docarray.index,
docarray.data,
docarray.documents,
docarray.store,
docarray.typing,
find,
map,
filter,
]
def get_obj_to_check(lib):
obj_to_check = []
for obj in lib.__all__:
obj_to_check.append(getattr(lib, obj))
return obj_to_check
obj_to_check = []
for lib in SUB_MODULE_TO_CHECK:
obj_to_check.extend(get_obj_to_check(lib))
members = []
for obj in obj_to_check:
members.extend(get_codeblock_members(obj))
@pytest.mark.parametrize("obj", members, ids=lambda d: d.__qualname__)
def test_member(obj):
check_docstring(obj)
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form AnyTensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type AnyTensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form AnyTensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
comp_backend = cls.get_comp_backend()
# at runtime, 'T' is always the correct input type for .stack()
# but mypy doesn't know that, so we ignore it here
return cls.__docarray_from_native__(comp_backend.stack(seq)) # type: ignore
@classmethod
@abc.abstractmethod
def __docarray_from_native__(cls: Type[T], value: Any) -> T:
"""
Create a DocArray tensor from a tensor that is native to the given framework,
e.g. from numpy.ndarray or torch.Tensor.
"""
...
@staticmethod
@abc.abstractmethod
def get_comp_backend() -> Type[AbstractComputationalBackend]:
"""The computational backend compatible with this tensor type."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.computation import AbstractComputationalBackend
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __docarray_validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __docarray_validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _docarray_create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__docarray_validate_shape__(t, _cls._docarray_target_shape)
_ParametrizedTensor.__name__ = f'{cls.__name__}[{shape_str}]'
_ParametrizedTensor.__qualname__ = f'{cls.__qualname__}[{shape_str}]'
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__docarray_validate_getitem__(item)
return cls._docarray_create_parametrized_type(target_shape)
@classmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
comp_backend = cls.get_comp_backend()
# at runtime, 'T' is always the correct input type for .stack()
# but mypy doesn't know that, so we ignore it here
return cls.__docarray_from_native__(comp_backend.stack(seq)) # type: ignore
@classmethod
@abc.abstractmethod
def __docarray_from_native__(cls: Type[T], value: Any) -> T:
"""
Create a DocArray tensor from a tensor that is native to the given framework,
e.g. from numpy.ndarray or torch.Tensor.
"""
...
@staticmethod
@abc.abstractmethod
def get_comp_backend() -> Type[AbstractComputationalBackend]:
"""The computational backend compatible with this tensor type."""
...
|
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = 0
# set multi-process start method as `fork` to speed up the training
mp_start_method = 'fork'
|
from __future__ import annotations
from .CSRLoss import CSRLoss
from .CSRReconstructionLoss import CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
# TODO: Test cached losses
|
from __future__ import annotations
from .CSRLoss import CSRLoss
from .CSRReconstructionLoss import CSRReconstructionLoss
from .FlopsLoss import FlopsLoss
from .SparseAnglELoss import SparseAnglELoss
from .SparseCachedGISTEmbedLoss import SparseCachedGISTEmbedLoss
from .SparseCachedMultipleNegativesRankingLoss import SparseCachedMultipleNegativesRankingLoss
from .SparseCoSENTLoss import SparseCoSENTLoss
from .SparseCosineSimilarityLoss import SparseCosineSimilarityLoss
from .SparseDistillKLDivLoss import SparseDistillKLDivLoss
from .SparseGISTEmbedLoss import SparseGISTEmbedLoss
from .SparseMarginMSELoss import SparseMarginMSELoss
from .SparseMSELoss import SparseMSELoss
from .SparseMultipleNegativesRankingLoss import SparseMultipleNegativesRankingLoss
from .SparseTripletLoss import SparseTripletLoss
from .SpladeLoss import SpladeLoss
__all__ = [
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
]
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(
f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
"""
).encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
_ = delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(
f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
"""
).encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
import datasets
from datasets.config import METADATA_CONFIGS_FIELD
from datasets.hub import delete_from_hub
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""").encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
_ = delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
""").encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
|
from typing import Any, Optional
class ServiceContext:
"""
Service Context container.
NOTE: Deprecated, use llama_index.settings.Settings instead or pass in
modules to local functions/methods/interfaces.
"""
def __init__(self, **kwargs: Any) -> None:
raise ValueError(
"ServiceContext is deprecated. Use llama_index.settings.Settings instead, "
"or pass in modules to local functions/methods/interfaces.\n"
"See the docs for updated usage/migration: \n"
"https://docs.llamaindex.ai/en/stable/module_guides/supporting_modules/service_context_migration/"
)
@classmethod
def from_defaults(
cls,
**kwargs: Any,
) -> "ServiceContext":
"""
Create a ServiceContext from defaults.
NOTE: Deprecated, use llama_index.settings.Settings instead or pass in
modules to local functions/methods/interfaces.
"""
raise ValueError(
"ServiceContext is deprecated. Use llama_index.settings.Settings instead, "
"or pass in modules to local functions/methods/interfaces.\n"
"See the docs for updated usage/migration: \n"
"https://docs.llamaindex.ai/en/stable/module_guides/supporting_modules/service_context_migration/"
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
raise ValueError(
"ServiceContext is deprecated. Use llama_index.settings.Settings instead, "
"or pass in modules to local functions/methods/interfaces.\n"
"See the docs for updated usage/migration: \n"
"https://docs.llamaindex.ai/en/stable/module_guides/supporting_modules/service_context_migration/"
)
|
from typing import Any, Optional
class ServiceContext:
"""Service Context container.
NOTE: Deprecated, use llama_index.settings.Settings instead or pass in
modules to local functions/methods/interfaces.
"""
def __init__(self, **kwargs: Any) -> None:
raise ValueError(
"ServiceContext is deprecated. Use llama_index.settings.Settings instead, "
"or pass in modules to local functions/methods/interfaces.\n"
"See the docs for updated usage/migration: \n"
"https://docs.llamaindex.ai/en/stable/module_guides/supporting_modules/service_context_migration/"
)
@classmethod
def from_defaults(
cls,
**kwargs: Any,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
NOTE: Deprecated, use llama_index.settings.Settings instead or pass in
modules to local functions/methods/interfaces.
"""
raise ValueError(
"ServiceContext is deprecated. Use llama_index.settings.Settings instead, "
"or pass in modules to local functions/methods/interfaces.\n"
"See the docs for updated usage/migration: \n"
"https://docs.llamaindex.ai/en/stable/module_guides/supporting_modules/service_context_migration/"
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
raise ValueError(
"ServiceContext is deprecated. Use llama_index.settings.Settings instead, "
"or pass in modules to local functions/methods/interfaces.\n"
"See the docs for updated usage/migration: \n"
"https://docs.llamaindex.ai/en/stable/module_guides/supporting_modules/service_context_migration/"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from mmengine.registry import EVALUATOR, METRICS
from .metric import BaseMetric
@EVALUATOR.register_module()
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
self.metrics.append(metric)
@property
def dataset_meta(self) -> Optional[dict]:
"""Optional[dict]: Meta info of the dataset."""
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
"""Set the dataset meta info to the evaluator and it's metrics."""
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[dict],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for data in data_batch:
if isinstance(data['data_sample'], BaseDataElement):
_data_batch.append(
dict(
inputs=data['inputs'],
data_sample=data['data_sample'].to_dict()))
else:
_data_batch.append(data)
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Iterator, List, Optional, Sequence, Union
from mmengine.data import BaseDataElement
from ..registry.root import EVALUATOR, METRICS
from .metric import BaseMetric
@EVALUATOR.register_module()
class Evaluator:
"""Wrapper class to compose multiple :class:`BaseMetric` instances.
Args:
metrics (dict or BaseMetric or Sequence): The config of metrics.
"""
def __init__(self, metrics: Union[dict, BaseMetric, Sequence]):
self._dataset_meta: Optional[dict] = None
if not isinstance(metrics, Sequence):
metrics = [metrics]
self.metrics: List[BaseMetric] = []
for metric in metrics:
if isinstance(metric, dict):
self.metrics.append(METRICS.build(metric))
else:
self.metrics.append(metric)
@property
def dataset_meta(self) -> Optional[dict]:
"""Optional[dict]: Meta info of the dataset."""
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
"""Set the dataset meta info to the evaluator and it's metrics."""
self._dataset_meta = dataset_meta
for metric in self.metrics:
metric.dataset_meta = dataset_meta
def process(self, data_batch: Sequence[dict],
predictions: Sequence[BaseDataElement]):
"""Convert ``BaseDataSample`` to dict and invoke process method of each
metric.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[BaseDataElement]): A batch of outputs from
the model.
"""
_data_batch = []
for data in data_batch:
if isinstance(data['data_sample'], BaseDataElement):
_data_batch.append(
dict(
inputs=data['inputs'],
data_sample=data['data_sample'].to_dict()))
else:
_data_batch.append(data)
_predictions = []
for pred in predictions:
if isinstance(pred, BaseDataElement):
_predictions.append(pred.to_dict())
else:
_predictions.append(pred)
for metric in self.metrics:
metric.process(_data_batch, _predictions)
def evaluate(self, size: int) -> dict:
"""Invoke ``evaluate`` method of each metric and collect the metrics
dictionary.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation results of all metrics. The keys are the names
of the metrics, and the values are corresponding results.
"""
metrics = {}
for metric in self.metrics:
_results = metric.evaluate(size)
# Check metric name conflicts
for name in _results.keys():
if name in metrics:
raise ValueError(
'There are multiple evaluation results with the same '
f'metric name {name}. Please make sure all metrics '
'have different prefixes.')
metrics.update(_results)
return metrics
def offline_evaluate(self,
data: Sequence,
predictions: Sequence,
chunk_size: int = 1):
"""Offline evaluate the dumped predictions on the given data .
Args:
data (Sequence): All data of the validation set.
predictions (Sequence): All predictions of the model on the
validation set.
chunk_size (int): The number of data samples and predictions to be
processed in a batch.
"""
# support chunking iterable objects
def get_chunks(seq: Iterator, chunk_size=1):
stop = False
while not stop:
chunk = []
for _ in range(chunk_size):
try:
chunk.append(next(seq))
except StopIteration:
stop = True
break
if chunk:
yield chunk
size = 0
for data_chunk, pred_chunk in zip(
get_chunks(iter(data), chunk_size),
get_chunks(iter(predictions), chunk_size)):
size += len(data_chunk)
self.process(data_chunk, pred_chunk)
return self.evaluate(size)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Union, Dict
import numpy as np
from annoy import AnnoyIndex
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class AnnoySearcher(Executor):
"""Annoy powered vector indexer
For more information about the Annoy supported parameters, please consult:
- https://github.com/spotify/annoy
.. note::
Annoy package dependency is only required at the query time.
"""
def __init__(
self,
default_top_k: int = 10,
metric: str = 'euclidean',
num_trees: int = 10,
dump_path: Optional[str] = None,
default_traversal_paths: List[str] = ['r'],
is_distance: bool = False,
**kwargs,
):
"""
Initialize an AnnoyIndexer
:param default_top_k: get tok k vectors
:param metric: Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot"
:param num_trees: builds a forest of n_trees trees. More trees gives higher precision when querying.
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param is_distance: Boolean flag that describes if distance metric need to be reinterpreted as similarities.
:param args:
:param kwargs:
"""
super().__init__(**kwargs)
self.default_top_k = default_top_k
self.metric = metric
self.num_trees = num_trees
self.default_traversal_paths = default_traversal_paths
self.is_distance = is_distance
self.logger = get_logger(self)
dump_path = dump_path or kwargs.get('runtime_args', {}).get('dump_path', None)
if dump_path is not None:
self.logger.info('Start building "AnnoyIndexer" from dump data')
ids, vecs = import_vectors(dump_path, str(self.metas.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = AnnoyIndex(num_dim, self.metric)
self._doc_id_to_offset = {}
self._load_index(self._ids, self._vecs)
else:
self.logger.warning(
'No data loaded in "AnnoyIndexer". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_item(idx, v.astype(np.float32))
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.build(self.num_trees)
@requests(on='/search')
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
if not hasattr(self, '_indexer'):
self.logger.warning('Querying against an empty index')
return
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.get_nns_by_vector(
doc.embedding, self.default_top_k, include_distances=True
)
for idx, dist in zip(indices, dists):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
if self.is_distance:
if self.metric == 'dot':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = dist
else:
if self.metric == 'dot':
match.scores[self.metric] = dist
elif self.metric == 'angular' or self.metric == 'hamming':
match.scores[self.metric] = 1 - dist
else:
match.scores[self.metric] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on='/fill_embedding')
def fill_embedding(self, query_da: DocumentArray, **kwargs):
for doc in query_da:
doc.embedding = np.array(
self._indexer.get_item_vector(int(self._doc_id_to_offset[str(doc.id)]))
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, List, Union, Dict
import numpy as np
from annoy import AnnoyIndex
from jina import Executor, requests, DocumentArray, Document
from jina_commons import get_logger
from jina_commons.indexers.dump import import_vectors
class AnnoySearcher(Executor):
"""Annoy powered vector indexer
For more information about the Annoy supported parameters, please consult:
- https://github.com/spotify/annoy
.. note::
Annoy package dependency is only required at the query time.
"""
def __init__(
self,
top_k: int = 10,
metric: str = 'euclidean',
num_trees: int = 10,
dump_path: Optional[str] = None,
default_traversal_paths: List[str] = ['r'],
**kwargs,
):
"""
Initialize an AnnoyIndexer
:param top_k: get tok k vectors
:param metric: Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot"
:param num_trees: builds a forest of n_trees trees. More trees gives higher precision when querying.
:param dump_path: the path to load ids and vecs
:param traverse_path: traverse path on docs, e.g. ['r'], ['c']
:param args:
:param kwargs:
"""
super().__init__(**kwargs)
self.top_k = top_k
self.metric = metric
self.num_trees = num_trees
self.default_traversal_paths = default_traversal_paths
self.logger = get_logger(self)
dump_path = dump_path or kwargs.get('runtime_args', {}).get('dump_path', None)
if dump_path is not None:
self.logger.info('Start building "AnnoyIndexer" from dump data')
ids, vecs = import_vectors(dump_path, str(self.metas.pea_id))
self._ids = np.array(list(ids))
self._vecs = np.array(list(vecs))
num_dim = self._vecs.shape[1]
self._indexer = AnnoyIndex(num_dim, self.metric)
self._doc_id_to_offset = {}
self._load_index(self._ids, self._vecs)
else:
self.logger.warning(
'No data loaded in "AnnoyIndexer". Use .rolling_update() to re-initialize it...'
)
def _load_index(self, ids, vecs):
for idx, v in enumerate(vecs):
self._indexer.add_item(idx, v.astype(np.float32))
self._doc_id_to_offset[ids[idx]] = idx
self._indexer.build(self.num_trees)
@requests(on='/search')
def search(self, docs: DocumentArray, parameters: Dict, **kwargs):
if not hasattr(self, '_indexer'):
self.logger.warning('Querying against an empty index')
return
traversal_paths = parameters.get(
'traversal_paths', self.default_traversal_paths
)
for doc in docs.traverse_flat(traversal_paths):
indices, dists = self._indexer.get_nns_by_vector(
doc.embedding, self.top_k, include_distances=True
)
for idx, dist in zip(indices, dists):
match = Document(id=self._ids[idx], embedding=self._vecs[idx])
match.scores['distance'] = 1 / (1 + dist)
doc.matches.append(match)
@requests(on='/fill_embedding')
def fill_embedding(self, query_da: DocumentArray, **kwargs):
for doc in query_da:
doc.embedding = np.array(
self._indexer.get_item_vector(int(self._doc_id_to_offset[str(doc.id)]))
)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import CSRReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSR Loss module that combines the CSRReconstruction Loss and Sparse Multiple Negatives Ranking Loss MRL (InfoNCE).
Based on the paper:
Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation, https://arxiv.org/abs/2503.01776
This module computes the combined loss according to the formula:
L_CSR = L_recon + γ * L_MRL
where:
- L_recon = L(k) + L(4k)/8 + β*L_aux
- L_MRL is the Multiple Negatives Ranking Loss
"""
def __init__(self, model: SparseEncoder, beta: float = 0.1, gamma: float = 1.0, scale: float = 20.0):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = CSRReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self, sentence_features: Iterable[dict[str, torch.Tensor]], labels: torch.Tensor = None
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sentence_embedding = [output["sentence_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sentence_embedding)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta, "gamma": self.gamma, "scale": self.scale}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.ReconstructionLoss import ReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSR Loss module that combines Reconstruction Loss and Sparse Multiple Negatives Ranking Loss.
Based on the paper:
Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation, https://arxiv.org/abs/2503.01776
This module computes the combined loss according to the formula:
L_CSR = L_recon + γ * L_MRL
where:
- L_recon = L(k) + L(4k)/8 + β*L_aux
- L_MRL is the Multiple Negatives Ranking Loss
"""
def __init__(
self,
model: SparseEncoder,
beta: float = 0.1,
gamma: float = 1.0,
scale: float = 20.0,
):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = ReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self,
sentence_features: Iterable[dict[str, torch.Tensor]],
labels: torch.Tensor = None,
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sentence_embedding = [output["sentence_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sentence_embedding)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
"gamma": self.gamma,
"scale": self.scale,
}
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
bgr_to_rgb=False),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCoSENTLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.0.0'
mmengine_maximum_version = '0.2.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.0.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.0.0'
mmengine_maximum_version = '0.2.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version <= digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<={mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
SMTP = "smtp"
TWITTER = "twitter"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
SMTP = "smtp"
TWITTER = "twitter"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
# test with format_only=True, keep_results=False
with self.assertRaises(AssertionError):
CityScapesMetric(
outfile_prefix=self.tmp_dir.name + 'test',
format_only=True,
keep_results=False)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
keep_results=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
keep_results=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from __future__ import annotations
from typing import Any, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class CoSENTLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.pairwise_cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence) loss.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`CosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, CoSENTLoss may be used
as a drop-in replacement for :class:`CosineSimilarityLoss` in any training script.
Args:
model: SentenceTransformerModel
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Relations:
- :class:`AnglELoss` is CoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`CosineSimilarityLoss` seems to produce a weaker training signal than CoSENTLoss. In our experiments, CoSENTLoss is recommended.
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
})
loss = losses.CoSENTLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(CoSENTLoss, self).__init__()
self.model = model
self.similarity_fct = similarity_fct
self.scale = scale
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
scores = self.similarity_fct(embeddings[0], embeddings[1])
scores = scores * self.scale
scores = scores[:, None] - scores[None, :]
# label matrix indicating which pairs are relevant
labels = labels[:, None] < labels[None, :]
labels = labels.float()
# mask out irrelevant pairs so they are negligible after exp()
scores = scores - (1 - labels) * 1e12
# append a zero as e^0 = 1
scores = torch.cat((torch.zeros(1).to(scores.device), scores.view(-1)), dim=0)
loss = torch.logsumexp(scores, dim=0)
return loss
def get_config_dict(self) -> dict[str, Any]:
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
@property
def citation(self) -> str:
return """
@online{kexuefm-8847,
title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},
author={Su Jianlin},
year={2022},
month={Jan},
url={https://kexue.fm/archives/8847},
}
"""
|
from qdrant_client.http.models.models import Distance
DISTANCES = {
'cosine': Distance.COSINE,
'euclidean': Distance.EUCLID,
'dot': Distance.DOT,
}
|
from qdrant_openapi_client.models.models import Distance
DISTANCES = {
'cosine': Distance.COSINE,
'euclidean': Distance.EUCLID,
'dot': Distance.DOT,
}
|
"""Cloudflare embeddings file."""
from typing import Any, List, Optional
import requests
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
MAX_BATCH_SIZE = 100 # As per Cloudflare's maxItems limit for batch processing
API_URL_TEMPLATE = "https://api.cloudflare.com/client/v4/accounts/{}/ai/run/{}"
class CloudflareEmbedding(BaseEmbedding):
"""
Cloudflare Workers AI class for generating text embeddings.
This class allows for the generation of text embeddings using Cloudflare Workers AI with the BAAI general embedding models.
Args:
account_id (str): The Cloudflare Account ID.
auth_token (str, Optional): The Cloudflare Auth Token. Alternatively, set up environment variable `CLOUDFLARE_AUTH_TOKEN`.
model (str): The model ID for the embedding service. Cloudflare provides different models for embeddings, check https://developers.cloudflare.com/workers-ai/models/#text-embeddings. Defaults to "@cf/baai/bge-base-en-v1.5".
embed_batch_size (int): The batch size for embedding generation. Cloudflare's current limit is 100 at max. Defaults to llama_index's default.
Note:
Ensure you have a valid Cloudflare account and have access to the necessary AI services and models. The account ID and authorization token are sensitive details; secure them appropriately.
"""
account_id: str = Field(default=None, description="The Cloudflare Account ID.")
auth_token: str = Field(default=None, description="The Cloudflare Auth Token.")
model: str = Field(
default="@cf/baai/bge-base-en-v1.5",
description="The model to use when calling Cloudflare AI API",
)
_session: Any = PrivateAttr()
def __init__(
self,
account_id: str,
auth_token: Optional[str] = None,
model: str = "@cf/baai/bge-base-en-v1.5",
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model=model,
**kwargs,
)
self.account_id = account_id
self.auth_token = get_from_param_or_env(
"auth_token", auth_token, "CLOUDFLARE_AUTH_TOKEN", ""
)
self.model = model
self._session = requests.Session()
self._session.headers.update({"Authorization": f"Bearer {self.auth_token}"})
@classmethod
def class_name(cls) -> str:
return "CloudflareEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._aget_text_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_text_embeddings([text])[0]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
result = await self._aget_text_embeddings([text])
return result[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
response = self._session.post(
API_URL_TEMPLATE.format(self.account_id, self.model), json={"text": texts}
).json()
if "result" not in response:
print(response)
raise RuntimeError("Failed to fetch embeddings")
return response["result"]["data"]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
import aiohttp
async with aiohttp.ClientSession(trust_env=True) as session:
headers = {
"Authorization": f"Bearer {self.auth_token}",
"Accept-Encoding": "identity",
}
async with session.post(
API_URL_TEMPLATE.format(self.account_id, self.model),
json={"text": texts},
headers=headers,
) as response:
resp = await response.json()
if "result" not in resp:
raise RuntimeError("Failed to fetch embeddings asynchronously")
return resp["result"]["data"]
|
"""Cloudflare embeddings file."""
from typing import Any, List, Optional
import requests
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
MAX_BATCH_SIZE = 100 # As per Cloudflare's maxItems limit for batch processing
API_URL_TEMPLATE = "https://api.cloudflare.com/client/v4/accounts/{}/ai/run/{}"
class CloudflareEmbedding(BaseEmbedding):
"""
Cloudflare Workers AI class for generating text embeddings.
This class allows for the generation of text embeddings using Cloudflare Workers AI with the BAAI general embedding models.
Args:
account_id (str): The Cloudflare Account ID.
auth_token (str, Optional): The Cloudflare Auth Token. Alternatively, set up environment variable `CLOUDFLARE_AUTH_TOKEN`.
model (str): The model ID for the embedding service. Cloudflare provides different models for embeddings, check https://developers.cloudflare.com/workers-ai/models/#text-embeddings. Defaults to "@cf/baai/bge-base-en-v1.5".
embed_batch_size (int): The batch size for embedding generation. Cloudflare's current limit is 100 at max. Defaults to llama_index's default.
Note:
Ensure you have a valid Cloudflare account and have access to the necessary AI services and models. The account ID and authorization token are sensitive details; secure them appropriately.
"""
account_id: str = Field(default=None, description="The Cloudflare Account ID.")
auth_token: str = Field(default=None, description="The Cloudflare Auth Token.")
model: str = Field(
default="@cf/baai/bge-base-en-v1.5",
description="The model to use when calling Cloudflare AI API",
)
_session: Any = PrivateAttr()
def __init__(
self,
account_id: str,
auth_token: Optional[str] = None,
model: str = "@cf/baai/bge-base-en-v1.5",
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
super().__init__(
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model=model,
**kwargs,
)
self.account_id = account_id
self.auth_token = get_from_param_or_env(
"auth_token", auth_token, "CLOUDFLARE_AUTH_TOKEN", ""
)
self.model = model
self._session = requests.Session()
self._session.headers.update({"Authorization": f"Bearer {self.auth_token}"})
@classmethod
def class_name(cls) -> str:
return "CloudflareEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._get_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._aget_text_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._get_text_embeddings([text])[0]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
result = await self._aget_text_embeddings([text])
return result[0]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
response = self._session.post(
API_URL_TEMPLATE.format(self.account_id, self.model), json={"text": texts}
).json()
if "result" not in response:
print(response)
raise RuntimeError("Failed to fetch embeddings")
return response["result"]["data"]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
import aiohttp
async with aiohttp.ClientSession(trust_env=True) as session:
headers = {
"Authorization": f"Bearer {self.auth_token}",
"Accept-Encoding": "identity",
}
async with session.post(
API_URL_TEMPLATE.format(self.account_id, self.model),
json={"text": texts},
headers=headers,
) as response:
resp = await response.json()
if "result" not in resp:
raise RuntimeError("Failed to fetch embeddings asynchronously")
return resp["result"]["data"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing import assert_allclose
@MODELS.register_module()
class CustomDataPreprocessor(BaseDataPreprocessor):
def forward(self, data, training=False):
if training:
return 1
else:
return 2
class ToyModel(BaseModel):
def __init__(self, data_preprocessor=None):
super().__init__(data_preprocessor=data_preprocessor, init_cfg=None)
self.conv = nn.Conv2d(3, 1, 1)
def forward(self, batch_inputs, data_samples=None, mode='tensor'):
if mode == 'loss':
out = self.conv(batch_inputs)
return dict(loss=out)
elif mode == 'predict':
out = self.conv(batch_inputs)
return out
elif mode == 'tensor':
out = self.conv(batch_inputs)
return out
class TestBaseModel(TestCase):
def test_init(self):
# initiate model without `preprocess_cfg`
model = ToyModel()
self.assertIsInstance(model.data_preprocessor, BaseDataPreprocessor)
data_preprocessor = dict(type='CustomDataPreprocessor')
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIsInstance(model.data_preprocessor, CustomDataPreprocessor)
self.assertEqual(model.data_preprocessor(1, training=True), 1)
self.assertEqual(model.data_preprocessor(1, training=False), 2)
# initiate model with built `data_preprocessor`.
data_preprocessor = CustomDataPreprocessor()
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIs(model.data_preprocessor, data_preprocessor)
# initiate model with error type `data_preprocessor`.
with self.assertRaisesRegex(TypeError, 'data_preprocessor should be'):
ToyModel(data_preprocessor=[data_preprocessor])
def test_parse_losses(self):
model = ToyModel()
loss_cls = torch.tensor(1, dtype=torch.float32)
loss_list = [
torch.tensor(2, dtype=torch.float32),
torch.tensor(3, dtype=torch.float32)
]
losses = dict(loss_cls=loss_cls, loss_list=loss_list)
target_parsed_losses = torch.tensor(6, dtype=torch.float32)
targe_log_vars = dict(
loss=torch.tensor(6, dtype=torch.float32),
loss_cls=torch.tensor(1, dtype=torch.float32),
loss_list=torch.tensor(5, dtype=torch.float32))
parse_losses, log_vars = model.parse_losses(losses)
assert_allclose(parse_losses, target_parsed_losses)
for key in log_vars:
self.assertIn(key, targe_log_vars)
assert_allclose(log_vars[key], targe_log_vars[key])
with self.assertRaises(TypeError):
losses['error_key'] = dict()
model.parse_losses(losses)
def test_train_step(self):
model = ToyModel()
optimizer = SGD(model.parameters(), lr=0.1)
optim_wrapper = OptimWrapper(optimizer)
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
# initiate grad.
# model.conv.weight.grad = torch.randn(1, 3, 1, 1)
log_vars = model.train_step([data], optim_wrapper)
self.assertIsNotNone(model.conv.weight.grad)
self.assertIsInstance(log_vars['loss'], torch.Tensor)
def test_val_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
def test_test_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_cuda(self):
inputs = torch.randn(3, 1, 1).cuda()
data = dict(inputs=inputs)
model = ToyModel().cuda()
model.val_step([data])
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_to(self):
inputs = torch.randn(3, 1, 1).cuda()
data = dict(inputs=inputs)
model = ToyModel().to(torch.cuda.current_device())
model.val_step([data])
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
import torch.nn as nn
from torch.optim import SGD
from mmengine.model import BaseDataPreprocessor, BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODELS
from mmengine.testing import assert_allclose
@MODELS.register_module()
class CustomDataPreprocessor(BaseDataPreprocessor):
def forward(self, data, training=False):
if training:
return 1
else:
return 2
class ToyModel(BaseModel):
def __init__(self, data_preprocessor=None):
super().__init__(None, data_preprocessor=data_preprocessor)
self.conv = nn.Conv2d(3, 1, 1)
def forward(self, batch_inputs, data_samples=None, mode='tensor'):
if mode == 'loss':
out = self.conv(batch_inputs)
return dict(loss=out)
elif mode == 'predict':
out = self.conv(batch_inputs)
return out
elif mode == 'tensor':
out = self.conv(batch_inputs)
return out
class TestBaseModel(TestCase):
def test_init(self):
# initiate model without `preprocess_cfg`
model = ToyModel()
self.assertIsInstance(model.data_preprocessor, BaseDataPreprocessor)
data_preprocessor = dict(type='CustomDataPreprocessor')
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIsInstance(model.data_preprocessor, CustomDataPreprocessor)
self.assertEqual(model.data_preprocessor(1, training=True), 1)
self.assertEqual(model.data_preprocessor(1, training=False), 2)
# initiate model with built `data_preprocessor`.
data_preprocessor = CustomDataPreprocessor()
model = ToyModel(data_preprocessor=data_preprocessor)
self.assertIs(model.data_preprocessor, data_preprocessor)
# initiate model with error type `data_preprocessor`.
with self.assertRaisesRegex(TypeError, 'data_preprocessor should be'):
ToyModel(data_preprocessor=[data_preprocessor])
def test_parse_losses(self):
model = ToyModel()
loss_cls = torch.tensor(1, dtype=torch.float32)
loss_list = [
torch.tensor(2, dtype=torch.float32),
torch.tensor(3, dtype=torch.float32)
]
losses = dict(loss_cls=loss_cls, loss_list=loss_list)
target_parsed_losses = torch.tensor(6, dtype=torch.float32)
targe_log_vars = dict(
loss=torch.tensor(6, dtype=torch.float32),
loss_cls=torch.tensor(1, dtype=torch.float32),
loss_list=torch.tensor(5, dtype=torch.float32))
parse_losses, log_vars = model.parse_losses(losses)
assert_allclose(parse_losses, target_parsed_losses)
for key in log_vars:
self.assertIn(key, targe_log_vars)
assert_allclose(log_vars[key], targe_log_vars[key])
with self.assertRaises(TypeError):
losses['error_key'] = dict()
model.parse_losses(losses)
def test_train_step(self):
model = ToyModel()
optimizer = SGD(model.parameters(), lr=0.1)
optim_wrapper = OptimWrapper(optimizer)
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
# initiate grad.
# model.conv.weight.grad = torch.randn(1, 3, 1, 1)
log_vars = model.train_step([data], optim_wrapper)
self.assertIsNotNone(model.conv.weight.grad)
self.assertIsInstance(log_vars['loss'], torch.Tensor)
def test_val_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
def test_test_step(self):
inputs = torch.randn(3, 1, 1)
data = dict(inputs=inputs)
model = ToyModel()
out = model.val_step([data])
self.assertIsInstance(out, torch.Tensor)
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_cuda(self):
inputs = torch.randn(3, 1, 1).cuda()
data = dict(inputs=inputs)
model = ToyModel().cuda()
model.val_step([data])
@unittest.skipIf(not torch.cuda.is_available(), 'cuda should be available')
def test_to(self):
inputs = torch.randn(3, 1, 1).cuda()
data = dict(inputs=inputs)
model = ToyModel().to(torch.cuda.current_device())
model.val_step([data])
|
import gzip
import os
from . import InputExample
class NLIDataReader(object):
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
from . import InputExample
import csv
import gzip
import os
class NLIDataReader(object):
"""
Reads in the Stanford NLI dataset and the MultiGenre NLI dataset
"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, 's1.' + filename),
mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, 's2.' + filename),
mode="rt", encoding="utf-8").readlines()
labels = gzip.open(os.path.join(self.dataset_folder, 'labels.' + filename),
mode="rt", encoding="utf-8").readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.