input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.fastembed import FastEmbedEmbedding
from pathlib import Path
from unittest.mock import patch
def test_class():
names_of_base_classes = [b.__name__ for b in FastEmbedEmbedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
@patch("llama_index.embeddings.fastembed.base.TextEmbedding")
def test_create_fastembed_embedding(mock_text_embedding):
cache = Path("./test_cache_2")
fastembed_embedding = FastEmbedEmbedding(
cache_dir=str(cache),
embed_batch_size=24,
doc_embed_type="passage",
)
assert fastembed_embedding.cache_dir == str(cache)
assert fastembed_embedding.embed_batch_size == 24
assert fastembed_embedding.doc_embed_type == "passage"
assert mock_text_embedding.call_args.kwargs["cache_dir"] == str(cache)
assert mock_text_embedding.call_args.kwargs["threads"] is None
assert mock_text_embedding.call_args.kwargs["providers"] is None
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.fastembed import FastEmbedEmbedding
def test_class():
names_of_base_classes = [b.__name__ for b in FastEmbedEmbedding.__mro__]
assert BaseEmbedding.__name__ in names_of_base_classes
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.17.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.17.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py',
'../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=1))
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 20],
gamma=0.1)
]
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
default_hooks = dict(logger=dict(interval=1))
log_processor = dict(window_size=1)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py',
'../_base_/default_runtime.py'
]
model = dict(bbox_head=dict(num_classes=1))
# optimizer
optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.001,
step=[16, 20])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=24)
log_config = dict(interval=1)
# TODO add auto-scale-lr after a series of experiments
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8429 Spearman: 0.8366
Model Sparsity: Active Dimensions: 78.3, Sparsity Ratio: 0.9974
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8366
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8430 Spearman: 0.8368
Model Sparsity: Active Dimensions: 81.1, Sparsity Ratio: 0.9973
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
"""
OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages.
On the website, you can download parallel datasets for many languages in different formats. I found that
the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal
overhead for post-processing to get it into a suitable format for this library.
You can use the OPUS dataset to create multilingual sentence embeddings. This script contains code to download
OPUS datasets for the desired languages and to create training files in the right format.
1) First, you need to install OpusTools (https://github.com/Helsinki-NLP/OpusTools/tree/master/opustools_pkg):
pip install opustools
2) Once you have OpusTools installed, you can download data in the right format via:
mkdir parallel-sentences
opus_read -d [CORPUS] -s [SRC_LANG] -t [TRG_LANG] --write parallel-sentences/[FILENAME].tsv.gz -wm moses -dl opus -p raw
For example:
mkdir parallel-sentences
opus_read -d JW300 -s en -t de --write parallel-sentences/JW300-en-de.tsv.gz -wm moses -dl opus -p raw
This downloads the JW300 Corpus (http://opus.nlpl.eu/JW300.php) for English (en) and German (de) and write the output to
parallel-sentences/JW300-en-de.tsv.gz
####################
This python code automates the download and creation of the parallel sentences files.
"""
import os
from opustools import OpusRead
corpora = ["JW300"] # Corpora you want to use
source_languages = ["en"] # Source language, our teacher model is able to understand
target_languages = ["de", "es", "it", "fr", "ar", "tr"] # Target languages, out student model should learn
output_folder = "parallel-sentences"
opus_download_folder = "./opus"
# Iterator over all corpora / source languages / target languages combinations and download files
os.makedirs(output_folder, exist_ok=True)
for corpus in corpora:
for src_lang in source_languages:
for trg_lang in target_languages:
output_filename = os.path.join(output_folder, "{}-{}-{}.tsv.gz".format(corpus, src_lang, trg_lang))
if not os.path.exists(output_filename):
print("Create:", output_filename)
try:
read = OpusRead(
directory=corpus,
source=src_lang,
target=trg_lang,
write=[output_filename],
download_dir=opus_download_folder,
preprocess="raw",
write_mode="moses",
suppress_prompts=True,
)
read.printPairs()
except Exception:
print("An error occurred during the creation of", output_filename)
|
"""
OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages.
On the website, you can download parallel datasets for many languages in different formats. I found that
the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal
overhead for post-processing to get it into a suitable format for this library.
You can use the OPUS dataset to create multilingual sentence embeddings. This script contains code to download
OPUS datasets for the desired languages and to create training files in the right format.
1) First, you need to install OpusTools (https://github.com/Helsinki-NLP/OpusTools/tree/master/opustools_pkg):
pip install opustools
2) Once you have OpusTools installed, you can download data in the right format via:
mkdir parallel-sentences
opus_read -d [CORPUS] -s [SRC_LANG] -t [TRG_LANG] --write parallel-sentences/[FILENAME].tsv.gz -wm moses -dl opus -p raw
For example:
mkdir parallel-sentences
opus_read -d JW300 -s en -t de --write parallel-sentences/JW300-en-de.tsv.gz -wm moses -dl opus -p raw
This downloads the JW300 Corpus (http://opus.nlpl.eu/JW300.php) for English (en) and German (de) and write the output to
parallel-sentences/JW300-en-de.tsv.gz
####################
This python code automates the download and creation of the parallel sentences files.
"""
from opustools import OpusRead
import os
corpora = ["JW300"] # Corpora you want to use
source_languages = ["en"] # Source language, our teacher model is able to understand
target_languages = ["de", "es", "it", "fr", "ar", "tr"] # Target languages, out student model should learn
output_folder = "parallel-sentences"
opus_download_folder = "./opus"
# Iterator over all corpora / source languages / target languages combinations and download files
os.makedirs(output_folder, exist_ok=True)
for corpus in corpora:
for src_lang in source_languages:
for trg_lang in target_languages:
output_filename = os.path.join(output_folder, "{}-{}-{}.tsv.gz".format(corpus, src_lang, trg_lang))
if not os.path.exists(output_filename):
print("Create:", output_filename)
try:
read = OpusRead(
directory=corpus,
source=src_lang,
target=trg_lang,
write=[output_filename],
download_dir=opus_download_folder,
preprocess="raw",
write_mode="moses",
suppress_prompts=True,
)
read.printPairs()
except Exception:
print("An error occurred during the creation of", output_filename)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, List, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
require_gpu: bool = False,
download_data: bool = True,
default_batch_size: int = 32,
default_traversal_paths: List[str] = ['r'],
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param require_gpu: device to use for encoding ['cuda', 'cpu] - if not set,
the device is detected automatically
:param default_batch_size: Default batch size, used if ``batch_size`` is not
provided as a parameter in the request
:param default_traversal_paths: Default traversal paths, used if ``traversal_paths``
are not provided as a parameter in the request.
"""
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if require_gpu:
spacy.require_gpu()
if download_data:
subprocess.run(
['python', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if docs:
batch_size = parameters.get('batch_size', self.default_batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, List, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
:param model_name: pre-trained spaCy language pipeline name
:param require_gpu: device to use for encoding ['cuda', 'cpu] - if not set,
the device is detected automatically
:param default_batch_size: Default batch size, used if ``batch_size`` is not
provided as a parameter in the request
:param default_traversal_paths: Default traversal paths, used if ``traversal_paths``
are not provided as a parameter in the request.
:param args: Additional positional arguments.
:param kwargs: Additional positional arguments.
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
require_gpu: bool = False,
download_data: bool = True,
default_batch_size: int = 32,
default_traversal_paths: List[str] = ['r'],
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if require_gpu:
spacy.require_gpu()
if download_data:
subprocess.run(
['python', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if docs:
batch_size = parameters.get('batch_size', self.default_batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
doc.embedding = spacy_doc.vector
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RescalingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_rescaling_basics(self):
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_rescaling_dtypes(self):
# int scale
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 2, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int offset
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0, "offset": 2},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int inputs
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
input_dtype="int16",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_rescaling_correctness(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
out = layer(x)
self.assertAllClose(out, x / 255 + 0.5)
def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
next(iter(ds)).numpy()
def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
backend.set_image_data_format("channels_first")
layer = layers.Rescaling(
scale=[1.0 / 255, 1.5 / 255, 2.0 / 255], offset=0.5
)
x = np.random.random((2, 3, 10, 10)) * 255
layer(x)
backend.set_image_data_format(config)
@pytest.mark.requires_trainable_backend
def test_numpy_args(self):
# https://github.com/keras-team/keras/issues/20072
self.run_layer_test(
layers.Rescaling,
init_kwargs={
"scale": np.array(1.0 / 255.0),
"offset": np.array(0.5),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RescalingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_rescaling_basics(self):
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_rescaling_dtypes(self):
# int scale
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 2, "offset": 0.5},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int offset
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0, "offset": 2},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# int inputs
self.run_layer_test(
layers.Rescaling,
init_kwargs={"scale": 1.0 / 255, "offset": 0.5},
input_shape=(2, 3),
input_dtype="int16",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_rescaling_correctness(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
out = layer(x)
self.assertAllClose(out, x / 255 + 0.5)
def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
for output in ds.take(1):
output.numpy()
def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
backend.set_image_data_format("channels_first")
layer = layers.Rescaling(
scale=[1.0 / 255, 1.5 / 255, 2.0 / 255], offset=0.5
)
x = np.random.random((2, 3, 10, 10)) * 255
layer(x)
backend.set_image_data_format(config)
@pytest.mark.requires_trainable_backend
def test_numpy_args(self):
# https://github.com/keras-team/keras/issues/20072
self.run_layer_test(
layers.Rescaling,
init_kwargs={
"scale": np.array(1.0 / 255.0),
"offset": np.array(0.5),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
|
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
return AgentFinish({"output": text}, text)
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser
)
return cls(output_fixing_parser=output_fixing_parser)
if base_parser is not None:
return cls(base_parser=base_parser)
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
from __future__ import annotations
import json
import logging
import re
from re import Pattern
from typing import Optional, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from pydantic import Field
from langchain.agents.agent import AgentOutputParser
from langchain.agents.structured_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.output_parsers import OutputFixingParser
logger = logging.getLogger(__name__)
class StructuredChatOutputParser(AgentOutputParser):
"""Output parser for the structured chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"```(?:json\s+)?(\W.*?)```", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
action_match = self.pattern.search(text)
if action_match is not None:
response = json.loads(action_match.group(1).strip(), strict=False)
if isinstance(response, list):
# gpt turbo frequently ignores the directive to emit a single action
logger.warning("Got multiple action responses: %s", response)
response = response[0]
if response["action"] == "Final Answer":
return AgentFinish({"output": response["action_input"]}, text)
else:
return AgentAction(
response["action"], response.get("action_input", {}), text
)
else:
return AgentFinish({"output": text}, text)
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "structured_chat"
class StructuredChatOutputParserWithRetries(AgentOutputParser):
"""Output parser with retries for the structured chat agent."""
base_parser: AgentOutputParser = Field(default_factory=StructuredChatOutputParser)
"""The base parser to use."""
output_fixing_parser: Optional[OutputFixingParser] = None
"""The output fixing parser to use."""
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
try:
if self.output_fixing_parser is not None:
parsed_obj: Union[AgentAction, AgentFinish] = (
self.output_fixing_parser.parse(text)
)
else:
parsed_obj = self.base_parser.parse(text)
return parsed_obj
except Exception as e:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
@classmethod
def from_llm(
cls,
llm: Optional[BaseLanguageModel] = None,
base_parser: Optional[StructuredChatOutputParser] = None,
) -> StructuredChatOutputParserWithRetries:
if llm is not None:
base_parser = base_parser or StructuredChatOutputParser()
output_fixing_parser: OutputFixingParser = OutputFixingParser.from_llm(
llm=llm, parser=base_parser
)
return cls(output_fixing_parser=output_fixing_parser)
elif base_parser is not None:
return cls(base_parser=base_parser)
else:
return cls()
@property
def _type(self) -> str:
return "structured_chat_with_retries"
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are indexed to Elasticsearch together with their respective sentence
embeddings.
The script shows results from BM25 as well as from semantic search with
cosine similarity.
You need Elasticsearch up and running, for example using Docker
(https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html).
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
"""
import csv
import os
import time
from ssl import create_default_context
import tqdm.autonotebook
from elasticsearch import Elasticsearch, helpers
from sentence_transformers import SentenceTransformer, util
es = Elasticsearch(
hosts=["https://localhost:9200"],
basic_auth=("elastic", os.environ["ELASTIC_PASSWORD"]), # displayed at ES server startup
ssl_context=create_default_context(cafile="http_ca.crt"), # copied from inside ES container
)
model = SentenceTransformer("quora-distilbert-multilingual")
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
all_questions = {}
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
all_questions[row["qid1"]] = row["question1"]
if len(all_questions) >= max_corpus_size:
break
all_questions[row["qid2"]] = row["question2"]
if len(all_questions) >= max_corpus_size:
break
qids = list(all_questions.keys())
questions = [all_questions[qid] for qid in qids]
# Index data, if the index does not exists
if not es.indices.exists(index="quora"):
try:
es_index = {
"mappings": {
"properties": {
"question": {"type": "text"},
"question_vector": {"type": "dense_vector", "dims": 768, "index": True, "similarity": "cosine"},
}
}
}
es.indices.create(index="quora", body=es_index)
chunk_size = 500
print("Index data (you can stop it by pressing Ctrl+C once):")
with tqdm.tqdm(total=len(qids)) as pbar:
for start_idx in range(0, len(qids), chunk_size):
end_idx = start_idx + chunk_size
embeddings = model.encode(questions[start_idx:end_idx], show_progress_bar=False)
bulk_data = []
for qid, question, embedding in zip(qids[start_idx:end_idx], questions[start_idx:end_idx], embeddings):
bulk_data.append(
{
"_index": "quora",
"_id": qid,
"_source": {"question": question, "question_vector": embedding},
}
)
helpers.bulk(es, bulk_data)
pbar.update(chunk_size)
except Exception:
print("During index an exception occurred. Continue\n\n")
# Interactive search queries
while True:
inp_question = input("Please enter a question: ")
encode_start_time = time.time()
question_embedding = model.encode(inp_question)
encode_end_time = time.time()
# Lexical search
bm25 = es.search(index="quora", body={"query": {"match": {"question": inp_question}}})
# Semantic search
sem_search = es.search(
index="quora",
knn={"field": "question_vector", "query_vector": question_embedding, "k": 10, "num_candidates": 100},
)
print("Input question:", inp_question)
print(
"Computing the embedding took {:.3f} seconds, BM25 search took {:.3f} seconds, semantic search with ES took {:.3f} seconds".format(
encode_end_time - encode_start_time, bm25["took"] / 1000, sem_search["took"] / 1000
)
)
print("BM25 results:")
for hit in bm25["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\nSemantic Search results:")
for hit in sem_search["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\n\n========\n")
|
"""
This script contains an example how to perform semantic search with Elasticsearch.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Questions are indexed to Elasticsearch together with their respective sentence
embeddings.
The script shows results from BM25 as well as from semantic search with
cosine similarity.
You need Elasticsearch up and running, for example using Docker
(https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html).
Further, you need the Python Elasticsearch Client installed: https://elasticsearch-py.readthedocs.io/
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
"""
from sentence_transformers import SentenceTransformer, util
import os
from elasticsearch import Elasticsearch, helpers
from ssl import create_default_context
import csv
import time
import tqdm.autonotebook
es = Elasticsearch(
hosts=["https://localhost:9200"],
basic_auth=("elastic", os.environ["ELASTIC_PASSWORD"]), # displayed at ES server startup
ssl_context=create_default_context(cafile="http_ca.crt"), # copied from inside ES container
)
model = SentenceTransformer("quora-distilbert-multilingual")
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
all_questions = {}
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
all_questions[row["qid1"]] = row["question1"]
if len(all_questions) >= max_corpus_size:
break
all_questions[row["qid2"]] = row["question2"]
if len(all_questions) >= max_corpus_size:
break
qids = list(all_questions.keys())
questions = [all_questions[qid] for qid in qids]
# Index data, if the index does not exists
if not es.indices.exists(index="quora"):
try:
es_index = {
"mappings": {
"properties": {
"question": {"type": "text"},
"question_vector": {"type": "dense_vector", "dims": 768, "index": True, "similarity": "cosine"},
}
}
}
es.indices.create(index="quora", body=es_index)
chunk_size = 500
print("Index data (you can stop it by pressing Ctrl+C once):")
with tqdm.tqdm(total=len(qids)) as pbar:
for start_idx in range(0, len(qids), chunk_size):
end_idx = start_idx + chunk_size
embeddings = model.encode(questions[start_idx:end_idx], show_progress_bar=False)
bulk_data = []
for qid, question, embedding in zip(qids[start_idx:end_idx], questions[start_idx:end_idx], embeddings):
bulk_data.append(
{
"_index": "quora",
"_id": qid,
"_source": {"question": question, "question_vector": embedding},
}
)
helpers.bulk(es, bulk_data)
pbar.update(chunk_size)
except Exception:
print("During index an exception occurred. Continue\n\n")
# Interactive search queries
while True:
inp_question = input("Please enter a question: ")
encode_start_time = time.time()
question_embedding = model.encode(inp_question)
encode_end_time = time.time()
# Lexical search
bm25 = es.search(index="quora", body={"query": {"match": {"question": inp_question}}})
# Semantic search
sem_search = es.search(
index="quora",
knn={"field": "question_vector", "query_vector": question_embedding, "k": 10, "num_candidates": 100},
)
print("Input question:", inp_question)
print(
"Computing the embedding took {:.3f} seconds, BM25 search took {:.3f} seconds, semantic search with ES took {:.3f} seconds".format(
encode_end_time - encode_start_time, bm25["took"] / 1000, sem_search["took"] / 1000
)
)
print("BM25 results:")
for hit in bm25["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\nSemantic Search results:")
for hit in sem_search["hits"]["hits"][0:5]:
print("\t{}".format(hit["_source"]["question"]))
print("\n\n========\n")
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.retrieval_precision_metric import (
RetrievalPrecisionMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class RetrievalPrecisionEvaluator(BaseEvaluator):
"""
Tonic Validate's retrieval precision metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = RetrievalPrecisionMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.retrieval_precision_metric import (
RetrievalPrecisionMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class RetrievalPrecisionEvaluator(BaseEvaluator):
"""Tonic Validate's retrieval precision metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = RetrievalPrecisionMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query, answer=response)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from urllib.parse import quote
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.blocks.search import GetRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchTheWebBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
query: str = SchemaField(description="The search query to search the web for")
class Output(BlockSchema):
results: str = SchemaField(
description="The search results including content from top 5 URLs"
)
error: str = SchemaField(description="Error message if the search fails")
def __init__(self):
super().__init__(
id="87840993-2053-44b7-8da4-187ad4ee518c",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=SearchTheWebBlock.Input,
output_schema=SearchTheWebBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"query": "Artificial Intelligence",
},
test_credentials=TEST_CREDENTIALS,
test_output=("results", "search content"),
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
encoded_query = quote(input_data.query)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
class ExtractWebsiteContentBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
url: str = SchemaField(description="The URL to scrape the content from")
raw_content: bool = SchemaField(
default=False,
title="Raw Content",
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
advanced=True,
)
class Output(BlockSchema):
content: str = SchemaField(description="The scraped content from the given URL")
error: str = SchemaField(
description="Error message if the content cannot be retrieved"
)
def __init__(self):
super().__init__(
id="436c3984-57fd-4b85-8e9a-459b356883bd",
description="This block scrapes the content from the given web URL.",
categories={BlockCategory.SEARCH},
input_schema=ExtractWebsiteContentBlock.Input,
output_schema=ExtractWebsiteContentBlock.Output,
test_input={
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=("content", "scraped content"),
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
url = input_data.url
headers = {}
else:
url = f"https://r.jina.ai/{input_data.url}"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
content = self.get_request(url, json=False, headers=headers)
yield "content", content
|
from groq._utils._utils import quote
from backend.blocks.jina._auth import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
JinaCredentials,
JinaCredentialsField,
JinaCredentialsInput,
)
from backend.blocks.search import GetRequest
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class SearchTheWebBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
query: str = SchemaField(description="The search query to search the web for")
class Output(BlockSchema):
results: str = SchemaField(
description="The search results including content from top 5 URLs"
)
error: str = SchemaField(description="Error message if the search fails")
def __init__(self):
super().__init__(
id="87840993-2053-44b7-8da4-187ad4ee518c",
description="This block searches the internet for the given search query.",
categories={BlockCategory.SEARCH},
input_schema=SearchTheWebBlock.Input,
output_schema=SearchTheWebBlock.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"query": "Artificial Intelligence",
},
test_credentials=TEST_CREDENTIALS,
test_output=("results", "search content"),
test_mock={"get_request": lambda *args, **kwargs: "search content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
# Encode the search query
encoded_query = quote(input_data.query)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
# Prepend the Jina Search URL to the encoded query
jina_search_url = f"https://s.jina.ai/{encoded_query}"
results = self.get_request(jina_search_url, headers=headers, json=False)
# Output the search results
yield "results", results
class ExtractWebsiteContentBlock(Block, GetRequest):
class Input(BlockSchema):
credentials: JinaCredentialsInput = JinaCredentialsField()
url: str = SchemaField(description="The URL to scrape the content from")
raw_content: bool = SchemaField(
default=False,
title="Raw Content",
description="Whether to do a raw scrape of the content or use Jina-ai Reader to scrape the content",
advanced=True,
)
class Output(BlockSchema):
content: str = SchemaField(description="The scraped content from the given URL")
error: str = SchemaField(
description="Error message if the content cannot be retrieved"
)
def __init__(self):
super().__init__(
id="436c3984-57fd-4b85-8e9a-459b356883bd",
description="This block scrapes the content from the given web URL.",
categories={BlockCategory.SEARCH},
input_schema=ExtractWebsiteContentBlock.Input,
output_schema=ExtractWebsiteContentBlock.Output,
test_input={
"url": "https://en.wikipedia.org/wiki/Artificial_intelligence",
"credentials": TEST_CREDENTIALS_INPUT,
},
test_credentials=TEST_CREDENTIALS,
test_output=("content", "scraped content"),
test_mock={"get_request": lambda *args, **kwargs: "scraped content"},
)
def run(
self, input_data: Input, *, credentials: JinaCredentials, **kwargs
) -> BlockOutput:
if input_data.raw_content:
url = input_data.url
headers = {}
else:
url = f"https://r.jina.ai/{input_data.url}"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
content = self.get_request(url, json=False, headers=headers)
yield "content", content
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocumentArrayStacked
from docarray.array.abstract_array import AnyDocumentArray
class DocumentArraySummary:
def __init__(self, da: 'AnyDocumentArray'):
self.da = da
def summary(self) -> None:
"""
Print a summary of this DocumentArray object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocumentArrayStacked
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.da.__class__.__name__)
table.add_row('Length', str(len(self.da)), end_section=True)
if isinstance(self.da, DocumentArrayStacked):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(da=self.da)
for field_name in stacked_fields:
val = self.da
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocumentArray Summary', expand=False))
self.da.document_type.schema_summary()
@staticmethod
def _get_stacked_fields(da: 'DocumentArrayStacked') -> List[str]:
"""
Return a list of the field names of a DocumentArrayStacked instance that are
stacked, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in da._tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in da._doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocumentArraySummary._get_stacked_fields(da=value_doc)
]
)
return fields
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocumentArrayStacked
from docarray.array.abstract_array import AnyDocumentArray
class DocumentArraySummary:
def __init__(self, da: 'AnyDocumentArray'):
self.da = da
def summary(self) -> None:
"""
Print a summary of this DocumentArray object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocumentArrayStacked
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.da.__class__.__name__)
table.add_row('Length', str(len(self.da)), end_section=True)
if isinstance(self.da, DocumentArrayStacked):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(da=self.da)
for field_name in stacked_fields:
val = self.da
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.isnan(val).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocumentArray Summary', expand=False))
self.da.document_type.schema_summary()
@staticmethod
def _get_stacked_fields(da: 'DocumentArrayStacked') -> List[str]:
"""
Return a list of the field names of a DocumentArrayStacked instance that are
stacked, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in da._tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in da._doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocumentArraySummary._get_stacked_fields(da=value_doc)
]
)
return fields
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps
from docarray.base_document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'dtype,result_type',
[
('int64', 'int64'),
('float64', 'float64'),
('int8', 'int8'),
('double', 'float64'),
],
)
def test_dtype(dtype, result_type):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == result_type
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
import numpy as np
import pytest
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'dtype,result_type',
[
('int64', 'int64'),
('float64', 'float64'),
('int8', 'int8'),
('double', 'float64'),
],
)
def test_dtype(dtype, result_type):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == result_type
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/2d_matryoshka_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, inner_train_loss, [768, 512, 256, 128, 64])
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="2d-matryoshka-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
import logging
from datetime import datetime
import sys
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/2d_matryoshka_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, inner_train_loss, [768, 512, 256, 128, 64])
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="2d-matryoshka-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
parser = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
args = parser.parse_args()
device = "cpu"
prompt = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brightly buildings"
model_id = "path-to-your-trained-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(device)
# to channels last
pipe.unet = pipe.unet.to(memory_format=torch.channels_last)
pipe.vae = pipe.vae.to(memory_format=torch.channels_last)
pipe.text_encoder = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
pipe.safety_checker = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
sample = torch.randn(2, 4, 64, 64)
timestep = torch.rand(1) * 999
encoder_hidden_status = torch.randn(2, 77, 768)
input_example = (sample, timestep, encoder_hidden_status)
try:
pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=input_example)
except Exception:
pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True)
pipe.vae = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloat16, inplace=True)
pipe.text_encoder = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
if pipe.requires_safety_checker:
pipe.safety_checker = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloat16, inplace=True)
# compute
seed = 666
generator = torch.Generator(device).manual_seed(seed)
generate_kwargs = {"generator": generator}
if args.steps is not None:
generate_kwargs["num_inference_steps"] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
image = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
parser = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
args = parser.parse_args()
device = "cpu"
prompt = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
model_id = "path-to-your-trained-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(device)
# to channels last
pipe.unet = pipe.unet.to(memory_format=torch.channels_last)
pipe.vae = pipe.vae.to(memory_format=torch.channels_last)
pipe.text_encoder = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
pipe.safety_checker = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
sample = torch.randn(2, 4, 64, 64)
timestep = torch.rand(1) * 999
encoder_hidden_status = torch.randn(2, 77, 768)
input_example = (sample, timestep, encoder_hidden_status)
try:
pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=input_example)
except Exception:
pipe.unet = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloat16, inplace=True)
pipe.vae = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloat16, inplace=True)
pipe.text_encoder = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
if pipe.requires_safety_checker:
pipe.safety_checker = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloat16, inplace=True)
# compute
seed = 666
generator = torch.Generator(device).manual_seed(seed)
generate_kwargs = {"generator": generator}
if args.steps is not None:
generate_kwargs["num_inference_steps"] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
image = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.parrots_wrapper import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .distributed import MMDistributedDataParallel
from .seperate_distributed import MMSeparateDistributedDataParallel
from .utils import is_model_wrapper
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper',
'MMSeparateDistributedDataParallel'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .fully_sharded_distributed import \
MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from .distributed import MMDistributedDataParallel
from .seperate_distributed import MMSeparateDistributedDataParallel
from .utils import is_model_wrapper
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper',
'MMSeparateDistributedDataParallel'
]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
joined_dirs = "|".join(sys.argv[1:])
regex = re.compile(rf"^({joined_dirs}).*?\.py$")
relevant_modified_files = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
joined_dirs = "|".join(sys.argv[1:])
regex = re.compile(rf"^({joined_dirs}).*?\.py$")
relevant_modified_files = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
|
_base_ = './ga-retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './ga_retinanet_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict',
'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR', 'PolyMomentum',
'PolyParamScheduler', 'ReduceOnPlateauLR', 'ReduceOnPlateauMomentum',
'ReduceOnPlateauParamScheduler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, StepLR, StepMomentum,
StepParamScheduler, _ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'OptimWrapperDict',
'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR', 'PolyMomentum',
'PolyParamScheduler'
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py'
# learning policy
lr_config = dict(step=[20, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from typing import Any, Dict
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.prototype.transforms.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int, temporal_dim: int = -4):
super().__init__()
self.num_samples = num_samples
self.temporal_dim = temporal_dim
def _transform(self, inpt: datapoints.VideoType, params: Dict[str, Any]) -> datapoints.VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples, temporal_dim=self.temporal_dim)
|
from typing import Any, Dict
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class UniformTemporalSubsample(Transform):
_transformed_types = (features.is_simple_tensor, features.Video)
def __init__(self, num_samples: int, temporal_dim: int = -4):
super().__init__()
self.num_samples = num_samples
self.temporal_dim = temporal_dim
def _transform(self, inpt: features.VideoType, params: Dict[str, Any]) -> features.VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples, temporal_dim=self.temporal_dim)
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[57.375, 57.120, 58.395],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# ResNeXt-101-32x8d model trained with Caffe2 at FB,
# so the mean and std need to be changed.
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=8,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnext101_32x8d')))
|
"""Milvus reader."""
from typing import Any, Dict, List, Optional
from uuid import uuid4
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MilvusReader(BaseReader):
"""Milvus reader."""
def __init__(
self,
host: str = "localhost",
port: int = 19530,
user: str = "",
password: str = "",
use_secure: bool = False,
):
"""Initialize with parameters."""
import_err_msg = (
"`pymilvus` package not found, please run `pip install pymilvus`"
)
try:
import pymilvus # noqa
except ImportError:
raise ImportError(import_err_msg)
from pymilvus import MilvusException
self.host = host
self.port = port
self.user = user
self.password = password
self.use_secure = use_secure
self.collection = None
self.default_search_params = {
"IVF_FLAT": {"metric_type": "IP", "params": {"nprobe": 10}},
"IVF_SQ8": {"metric_type": "IP", "params": {"nprobe": 10}},
"IVF_PQ": {"metric_type": "IP", "params": {"nprobe": 10}},
"HNSW": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_FLAT": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_SQ": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": "IP", "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": "IP", "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": "IP", "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type": "IP", "params": {}},
}
try:
self._create_connection_alias()
except MilvusException:
raise
def load_data(
self,
query_vector: List[float],
collection_name: str,
expr: Any = None,
search_params: Optional[dict] = None,
limit: int = 10,
) -> List[Document]:
"""
Load data from Milvus.
Args:
collection_name (str): Name of the Milvus collection.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
from pymilvus import Collection, MilvusException
try:
self.collection = Collection(collection_name, using=self.alias)
except MilvusException:
raise
assert self.collection is not None
try:
self.collection.load()
except MilvusException:
raise
if search_params is None:
search_params = self._create_search_params()
res = self.collection.search(
[query_vector],
"embedding",
param=search_params,
expr=expr,
output_fields=["doc_id", "text"],
limit=limit,
)
documents = []
# TODO: In future append embedding when more efficient
for hit in res[0]:
document = Document(
id_=hit.entity.get("doc_id"),
text=hit.entity.get("text"),
)
documents.append(document)
return documents
def _create_connection_alias(self) -> None:
from pymilvus import connections
self.alias = None
# Attempt to reuse an open connection
for x in connections.list_connections():
addr = connections.get_connection_addr(x[0])
if (
x[1]
and ("address" in addr)
and (addr["address"] == f"{self.host}:{self.port}")
):
self.alias = x[0]
break
# Connect to the Milvus instance using the passed in Environment variables
if self.alias is None:
self.alias = uuid4().hex
connections.connect(
alias=self.alias,
host=self.host,
port=self.port,
user=self.user, # type: ignore
password=self.password, # type: ignore
secure=self.use_secure,
)
def _create_search_params(self) -> Dict[str, Any]:
assert self.collection is not None
index = self.collection.indexes[0]._index_params
search_params = self.default_search_params[index["index_type"]]
search_params["metric_type"] = index["metric_type"]
return search_params
|
"""Milvus reader."""
from typing import Any, Dict, List, Optional
from uuid import uuid4
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MilvusReader(BaseReader):
"""Milvus reader."""
def __init__(
self,
host: str = "localhost",
port: int = 19530,
user: str = "",
password: str = "",
use_secure: bool = False,
):
"""Initialize with parameters."""
import_err_msg = (
"`pymilvus` package not found, please run `pip install pymilvus`"
)
try:
import pymilvus # noqa
except ImportError:
raise ImportError(import_err_msg)
from pymilvus import MilvusException
self.host = host
self.port = port
self.user = user
self.password = password
self.use_secure = use_secure
self.collection = None
self.default_search_params = {
"IVF_FLAT": {"metric_type": "IP", "params": {"nprobe": 10}},
"IVF_SQ8": {"metric_type": "IP", "params": {"nprobe": 10}},
"IVF_PQ": {"metric_type": "IP", "params": {"nprobe": 10}},
"HNSW": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_FLAT": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_SQ": {"metric_type": "IP", "params": {"ef": 10}},
"RHNSW_PQ": {"metric_type": "IP", "params": {"ef": 10}},
"IVF_HNSW": {"metric_type": "IP", "params": {"nprobe": 10, "ef": 10}},
"ANNOY": {"metric_type": "IP", "params": {"search_k": 10}},
"AUTOINDEX": {"metric_type": "IP", "params": {}},
}
try:
self._create_connection_alias()
except MilvusException:
raise
def load_data(
self,
query_vector: List[float],
collection_name: str,
expr: Any = None,
search_params: Optional[dict] = None,
limit: int = 10,
) -> List[Document]:
"""Load data from Milvus.
Args:
collection_name (str): Name of the Milvus collection.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
from pymilvus import Collection, MilvusException
try:
self.collection = Collection(collection_name, using=self.alias)
except MilvusException:
raise
assert self.collection is not None
try:
self.collection.load()
except MilvusException:
raise
if search_params is None:
search_params = self._create_search_params()
res = self.collection.search(
[query_vector],
"embedding",
param=search_params,
expr=expr,
output_fields=["doc_id", "text"],
limit=limit,
)
documents = []
# TODO: In future append embedding when more efficient
for hit in res[0]:
document = Document(
id_=hit.entity.get("doc_id"),
text=hit.entity.get("text"),
)
documents.append(document)
return documents
def _create_connection_alias(self) -> None:
from pymilvus import connections
self.alias = None
# Attempt to reuse an open connection
for x in connections.list_connections():
addr = connections.get_connection_addr(x[0])
if (
x[1]
and ("address" in addr)
and (addr["address"] == f"{self.host}:{self.port}")
):
self.alias = x[0]
break
# Connect to the Milvus instance using the passed in Environment variables
if self.alias is None:
self.alias = uuid4().hex
connections.connect(
alias=self.alias,
host=self.host,
port=self.port,
user=self.user, # type: ignore
password=self.password, # type: ignore
secure=self.use_secure,
)
def _create_search_params(self) -> Dict[str, Any]:
assert self.collection is not None
index = self.collection.indexes[0]._index_params
search_params = self.default_search_params[index["index_type"]]
search_params["metric_type"] = index["metric_type"]
return search_params
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
# TODO: Consider the naming of this class
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder) -> None:
super().__init__()
self.model = model
self.ce_loss = nn.CrossEntropyLoss()
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0]
loss = self.ce_loss(logits, labels)
return loss
|
from __future__ import annotations
import time
from contextlib import ContextDecorator
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
class timer(ContextDecorator):
def __init__(self, name: str) -> None:
self.name = name
def __enter__(self) -> None:
self.start = time.time()
def __exit__(self, exc_type, exc_value, traceback) -> None:
print(f"{self.name} took {time.time() - self.start:.4f} seconds")
# TODO: Bad name, don't 1-1 copy the name from PyTorch
class CrossEntropyLoss(nn.Module):
def __init__(self, model: CrossEncoder) -> None:
super().__init__()
self.model = model
self.ce_loss = nn.CrossEntropyLoss()
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"CrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
# with timer("making pairs"):
pairs = list(zip(inputs[0], inputs[1]))
# with timer("tokenizing"):
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
# with timer("moving to device"):
tokens.to(self.model.device)
# with timer(f"inference (shape: {tokens['input_ids'].shape})"):
logits = self.model(**tokens)[0]
# with timer("calculating loss"):
loss = self.ce_loss(logits, labels)
return loss
|
"""This tool allows agents to generate images using Steamship.
Steamship offers access to different third party image generation APIs
using a single API key.
Today the following models are supported:
- Dall-E
- Stable Diffusion
To use this tool, you must first set as environment variables:
STEAMSHIP_API_KEY
```
"""
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.steamship_image_generation.utils import make_image_public
if TYPE_CHECKING:
from steamship import Steamship
class ModelName(str, Enum):
"""Supported Image Models for generation."""
DALL_E = "dall-e"
STABLE_DIFFUSION = "stable-diffusion"
SUPPORTED_IMAGE_SIZES = {
ModelName.DALL_E: ("256x256", "512x512", "1024x1024"),
ModelName.STABLE_DIFFUSION: ("512x512", "768x768"),
}
class SteamshipImageGenerationTool(BaseTool):
"""Tool used to generate images from a text-prompt."""
model_name: ModelName
size: Optional[str] = "512x512"
steamship: Steamship
return_urls: Optional[bool] = False
name: str = "generate_image"
description: str = (
"Useful for when you need to generate an image."
"Input: A detailed text-2-image prompt describing an image"
"Output: the UUID of a generated image"
)
@model_validator(mode="before")
@classmethod
def validate_size(cls, values: Dict) -> Any:
if "size" in values:
size = values["size"]
model_name = values["model_name"]
if size not in SUPPORTED_IMAGE_SIZES[model_name]:
raise RuntimeError(f"size {size} is not supported by {model_name}")
return values
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
steamship_api_key = get_from_dict_or_env(
values, "steamship_api_key", "STEAMSHIP_API_KEY"
)
try:
from steamship import Steamship
except ImportError:
raise ImportError(
"steamship is not installed. "
"Please install it with `pip install steamship`"
)
steamship = Steamship(
api_key=steamship_api_key,
)
values["steamship"] = steamship
if "steamship_api_key" in values:
del values["steamship_api_key"]
return values
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
image_generator = self.steamship.use_plugin(
plugin_handle=self.model_name.value, config={"n": 1, "size": self.size}
)
task = image_generator.generate(text=query, append_output_to_file=True)
task.wait()
blocks = task.output.blocks
if len(blocks) > 0:
if self.return_urls:
return make_image_public(self.steamship, blocks[0])
else:
return blocks[0].id
raise RuntimeError(f"[{self.name}] Tool unable to generate image!")
|
"""This tool allows agents to generate images using Steamship.
Steamship offers access to different third party image generation APIs
using a single API key.
Today the following models are supported:
- Dall-E
- Stable Diffusion
To use this tool, you must first set as environment variables:
STEAMSHIP_API_KEY
```
"""
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.steamship_image_generation.utils import make_image_public
if TYPE_CHECKING:
from steamship import Steamship
class ModelName(str, Enum):
"""Supported Image Models for generation."""
DALL_E = "dall-e"
STABLE_DIFFUSION = "stable-diffusion"
SUPPORTED_IMAGE_SIZES = {
ModelName.DALL_E: ("256x256", "512x512", "1024x1024"),
ModelName.STABLE_DIFFUSION: ("512x512", "768x768"),
}
class SteamshipImageGenerationTool(BaseTool): # type: ignore[override]
"""Tool used to generate images from a text-prompt."""
model_name: ModelName
size: Optional[str] = "512x512"
steamship: Steamship
return_urls: Optional[bool] = False
name: str = "generate_image"
description: str = (
"Useful for when you need to generate an image."
"Input: A detailed text-2-image prompt describing an image"
"Output: the UUID of a generated image"
)
@model_validator(mode="before")
@classmethod
def validate_size(cls, values: Dict) -> Any:
if "size" in values:
size = values["size"]
model_name = values["model_name"]
if size not in SUPPORTED_IMAGE_SIZES[model_name]:
raise RuntimeError(f"size {size} is not supported by {model_name}")
return values
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
steamship_api_key = get_from_dict_or_env(
values, "steamship_api_key", "STEAMSHIP_API_KEY"
)
try:
from steamship import Steamship
except ImportError:
raise ImportError(
"steamship is not installed. "
"Please install it with `pip install steamship`"
)
steamship = Steamship(
api_key=steamship_api_key,
)
values["steamship"] = steamship
if "steamship_api_key" in values:
del values["steamship_api_key"]
return values
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
image_generator = self.steamship.use_plugin(
plugin_handle=self.model_name.value, config={"n": 1, "size": self.size}
)
task = image_generator.generate(text=query, append_output_to_file=True)
task.wait()
blocks = task.output.blocks
if len(blocks) > 0:
if self.return_urls:
return make_image_public(self.steamship, blocks[0])
else:
return blocks[0].id
raise RuntimeError(f"[{self.name}] Tool unable to generate image!")
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import pytest
from jina import Document, Flow
from video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(
on='/test',
inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos],
return_results=True,
)
assert resp[0].docs[0].embedding.shape == (512,)
assert resp[0].docs[1].embedding.shape == (512,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import pytest
from jina import Document, Flow
from ...video_torch_encoder import VideoTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=os.path.join(cur_dir, '../data/kinetics400'), frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
def test_video_torch_encoder(kinects_videos):
f = Flow().add(uses=VideoTorchEncoder)
with f:
resp = f.post(
on='/test',
inputs=[Document(blob=video.detach().numpy()) for video in kinects_videos],
return_results=True,
)
assert resp[0].docs[0].embedding.shape == (512,)
assert resp[0].docs[1].embedding.shape == (512,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
|
_base_ = './mask-rcnn_r50_fpn_sample1e-3_ms-2x_lvis-v0.5.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self,
port: int = None,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
**kwargs
):
super().__init__(**kwargs)
self.port = port
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
data_preprocessor = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
data_preprocessor=data_preprocessor,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilbert-base-uncased"
train_batch_size = 128
num_epochs = 1
max_seq_length = 32
# Save path to store our model
model_save_path = "output/training_stsb_simcse-{}-{}-{}".format(
model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Here we define our SentenceTransformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_samples = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = int(len(train_dataloader) * 0.1) # Evaluate every 10% of the data
logging.info("Training sentences: {}".format(len(train_samples)))
logging.info("Warmup-steps: {}".format(warmup_steps))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
warmup_steps=warmup_steps,
output_path=model_save_path,
optimizer_params={"lr": 5e-5},
use_amp=True, # Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Training parameters
model_name = 'distilbert-base-uncased'
train_batch_size = 128
num_epochs = 1
max_seq_length = 32
# Save path to store our model
model_save_path = 'output/training_stsb_simcse-{}-{}-{}'.format(model_name, train_batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Check if dataset exsist. If not, download and extract it
sts_dataset_path = 'data/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
# Here we define our SentenceTransformer model
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = 'data/wiki1m_for_simcse.txt'
if not os.path.exists(wikipedia_dataset_path):
util.http_get('https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt', wikipedia_dataset_path)
# train_samples is a list of InputExample objects where we pass the same sentence twice to texts, i.e. texts=[sent, sent]
train_samples = []
with open(wikipedia_dataset_path, 'r', encoding='utf8') as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
if row['split'] == 'dev':
dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
elif row['split'] == 'test':
test_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, batch_size=train_batch_size, name='sts-dev')
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, batch_size=train_batch_size, name='sts-test')
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
evaluation_steps = int(len(train_dataloader) * 0.1) #Evaluate every 10% of the data
logging.info("Training sentences: {}".format(len(train_samples)))
logging.info("Warmup-steps: {}".format(warmup_steps))
logging.info("Performance before training")
dev_evaluator(model)
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=evaluation_steps,
warmup_steps=warmup_steps,
output_path=model_save_path,
optimizer_params={'lr': 5e-5},
use_amp=True #Set to True, if your GPU supports FP16 cores
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator(model, output_path=model_save_path)
|
import random
import time
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
@pytest.mark.parametrize('stream', [True, False])
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol, stream):
class ExecutorRandomSleepExecutor(Executor):
@requests
def foo(self, *args, **kwargs):
rand_sleep = random.uniform(0.1, 1.3)
time.sleep(rand_sleep)
f = Flow(protocol=protocol).add(uses=ExecutorRandomSleepExecutor, replicas=2)
input_text = [f'ordinal-{i}' for i in range(180)]
input_da = DocumentArray([Document(text=t) for t in input_text])
with f:
for _ in range(5):
result_flow = f.post(
'/', inputs=input_da, request_size=10, results_in_order=True, stream=stream
)
for input, output in zip(input_da, result_flow):
assert input.text == output.text
c = Client(port=f.port, protocol=str(f.protocol))
for _ in range(5):
result_client = c.post(
'/', inputs=input_da, request_size=10, results_in_order=True, stream=stream
)
for input, output in zip(input_da, result_client):
assert input.text == output.text
|
import random
import time
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
@pytest.mark.parametrize('protocol', ['grpc'])
def test_return_order_in_client(protocol):
class ExecutorRandomSleepExecutor(Executor):
@requests
def foo(self, *args, **kwargs):
rand_sleep = random.uniform(0.1, 1.3)
time.sleep(rand_sleep)
f = Flow(protocol=protocol).add(uses=ExecutorRandomSleepExecutor, replicas=2)
input_text = [f'ordinal-{i}' for i in range(180)]
input_da = DocumentArray([Document(text=t) for t in input_text])
with f:
for _ in range(5):
result_flow = f.post(
'/', inputs=input_da, request_size=10, results_in_order=True
)
for input, output in zip(input_da, result_flow):
assert input.text == output.text
c = Client(port=f.port, protocol=str(f.protocol))
for _ in range(5):
result_client = c.post(
'/', inputs=input_da, request_size=10, results_in_order=True
)
for input, output in zip(input_da, result_client):
assert input.text == output.text
|
import inspect
from abc import ABC
from functools import reduce
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
class VersionedYAMLParser:
"""Flow YAML parser for specific version
Every :class:`VersionedYAMLParser` must implement two methods and one class attribute:
- :meth:`parse`: to load data (in :class:`dict`) into a :class:`BaseFlow` or :class:`BaseExecutor` object
- :meth:`dump`: to dump a :class:`BaseFlow` or :class:`BaseExecutor` object into a :class:`dict`
- :attr:`version`: version number in :class:`str` in format ``MAJOR.[MINOR]``
"""
version = 'legacy' #: the version number this parser designed for
def parse(
self, cls: type, data: Dict, runtime_args: Optional[Dict[str, Any]]
) -> Union['Flow', 'BaseExecutor']:
"""Return the Flow YAML parser given the syntax version number
.. # noqa: DAR401
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
"""
raise NotImplementedError
def dump(self, data: Union['Flow', 'BaseExecutor']) -> Dict:
"""Return the dictionary given a versioned flow object
.. # noqa: DAR401
:param data: versioned flow object
"""
raise NotImplementedError
class BaseLegacyParser(VersionedYAMLParser, ABC):
"""
BaseLegacyParser for classes that need parameter injection and that will be managed inside a runtime
for instance, :class:`BaseExecutor` and :class:`BaseGateway`
"""
@staticmethod
def _get_all_arguments(class_):
"""
:param class_: target class from which we want to retrieve arguments
:return: all the arguments of all the classes from which `class_` inherits
"""
def get_class_arguments(class_):
"""
:param class_: the class to check
:return: a list containing the arguments from `class_`
"""
signature = inspect.signature(class_.__init__)
class_arguments = [p.name for p in signature.parameters.values()]
return class_arguments
def accumulate_classes(cls) -> Set[Type]:
"""
:param cls: the class to check
:return: all classes from which cls inherits from
"""
def _accumulate_classes(c, cs):
cs.append(c)
if cls == object:
return cs
for base in c.__bases__:
_accumulate_classes(base, cs)
return cs
classes = []
_accumulate_classes(cls, classes)
return set(classes)
all_classes = accumulate_classes(class_)
args = list(map(lambda x: get_class_arguments(x), all_classes))
return set(reduce(lambda x, y: x + y, args))
|
import inspect
from abc import ABC
from functools import reduce
from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Type, Union
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.flow.base import Flow
from jina.serve.executors import BaseExecutor
class VersionedYAMLParser:
"""Flow YAML parser for specific version
Every :class:`VersionedYAMLParser` must implement two methods and one class attribute:
- :meth:`parse`: to load data (in :class:`dict`) into a :class:`BaseFlow` or :class:`BaseExecutor` object
- :meth:`dump`: to dump a :class:`BaseFlow` or :class:`BaseExecutor` object into a :class:`dict`
- :attr:`version`: version number in :class:`str` in format ``MAJOR.[MINOR]``
"""
version = 'legacy' #: the version number this parser designed for
def parse(
self, cls: type, data: Dict, runtime_args: Optional[Dict[str, Any]]
) -> Union['Flow', 'BaseExecutor']:
"""Return the Flow YAML parser given the syntax version number
.. # noqa: DAR401
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
"""
raise NotImplementedError
def dump(self, data: Union['Flow', 'BaseExecutor']) -> Dict:
"""Return the dictionary given a versioned flow object
.. # noqa: DAR401
:param data: versioned flow object
"""
raise NotImplementedError
class BaseLegacyParser(VersionedYAMLParser, ABC):
"""
BaseLegacyParser for classes that need parameter injection and that will be managed inside a runtime
for instance, :class:`BaseExecutor` and :class:`BaseGateway`
"""
@staticmethod
def _get_all_arguments(class_):
"""
:param class_: target class from which we want to retrieve arguments
:return: all the arguments of all the classes from which `class_` inherits
"""
def get_class_arguments(class_):
"""
:param class_: the class to check
:return: a list containing the arguments from `class_`
"""
signature = inspect.signature(class_.__init__)
class_arguments = [p.name for p in signature.parameters.values()]
return class_arguments
def accumulate_classes(cls) -> Set[Type]:
"""
:param cls: the class to check
:return: all classes from which cls inherits from
"""
def _accumulate_classes(c, cs):
cs.append(c)
if cls == object:
return cs
for base in c.__bases__:
_accumulate_classes(base, cs)
return cs
classes = []
_accumulate_classes(cls, classes)
return set(classes)
all_classes = accumulate_classes(class_)
args = list(map(lambda x: get_class_arguments(x), all_classes))
return set(reduce(lambda x, y: x + y, args))
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-float(1 + steps) / self.gamma) + self.momentum
averaged_param.lerp_(source_param, momentum)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-float(1 + steps) / self.gamma) + self.momentum
averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)
|
import copy
import warnings
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
<Deprecated version="2.14.0">
`use_auth_token` was deprecated in favor of `token` in version 2.14.0 and will be removed in 3.0.0.
</Deprecated>
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
token: Optional[Union[str, bool]] = None
use_auth_token = "deprecated"
ignore_url_params: bool = False
storage_options: Optional[Dict] = None
download_desc: Optional[str] = None
def __post_init__(self):
if self.use_auth_token != "deprecated":
warnings.warn(
"'use_auth_token' was deprecated in favor of 'token' in version 2.14.0 and will be removed in 3.0.0.\n"
f"You can remove this warning by passing 'token={self.use_auth_token}' instead.",
FutureWarning,
)
self.token = self.use_auth_token
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager.
Attributes:
cache_dir (`str` or `Path`, *optional*):
Specify a cache directory to save the file to (overwrite the
default cache dir).
force_download (`bool`, defaults to `False`):
If `True`, re-dowload the file even if it's already cached in
the cache dir.
resume_download (`bool`, defaults to `False`):
If `True`, resume the download if an incompletely received file is
found.
proxies (`dict`, *optional*):
user_agent (`str`, *optional*):
Optional string or dict that will be appended to the user-agent on remote
requests.
extract_compressed_file (`bool`, defaults to `False`):
If `True` and the path point to a zip or tar file,
extract the compressed file in a folder along the archive.
force_extract (`bool`, defaults to `False`):
If `True` when `extract_compressed_file` is `True` and the archive
was already extracted, re-extract the archive and override the folder where it was extracted.
delete_extracted (`bool`, defaults to `False`):
Whether to delete (or keep) the extracted files.
use_etag (`bool`, defaults to `True`):
Whether to use the ETag HTTP response header to validate the cached files.
num_proc (`int`, *optional*):
The number of processes to launch to download the files in parallel.
max_retries (`int`, default to `1`):
The number of times to retry an HTTP request if it fails.
use_auth_token (`str` or `bool`, *optional*):
Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If `True`, or not specified, will get token from `~/.huggingface`.
ignore_url_params (`bool`, defaults to `False`):
Whether to strip all query parameters and fragments from
the download URL before using it for caching the file.
storage_options (`dict`, *optional*):
Key/value pairs to be passed on to the dataset file-system backend, if any.
download_desc (`str`, *optional*):
A description to be displayed alongside with the progress bar while downloading the files.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
delete_extracted: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
ignore_url_params: bool = False
storage_options: Optional[Dict] = None
download_desc: Optional[str] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
import mmcv
from mmengine.config import Config
from mmengine.logging import MMLogger
from mmengine.utils import mkdir_or_exist
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--out-dir', default=None, help='Dir to output file')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, visualizer, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
raise NotImplementedError()
model = init_detector(
cfg, checkpoint, palette=args.palette, device=args.device)
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show or args.out_dir is not None:
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None
if args.out_dir is not None:
out_dir = args.out_dir
mkdir_or_exist(out_dir)
out_file = osp.join(
out_dir,
config_name.split('/')[-1].replace('py', 'jpg'))
visualizer.add_datasample(
'result',
img,
pred_sample=result,
show=args.show,
wait_time=args.wait_time,
out_file=out_file,
pred_score_thr=args.score_thr)
return result
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
config = Config.fromfile(args.config)
# init visualizer
visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')
visualizer = VISUALIZERS.build(visualizer_cfg)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test_image.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args,
logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os.path as osp
from argparse import ArgumentParser
import mmcv
from mmengine.config import Config
from mmengine.logging import MMLogger
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
parser.add_argument('--img', default='demo/demo.jpg', help='Image file')
parser.add_argument('--aug', action='store_true', help='aug test')
parser.add_argument('--model-name', help='model name to inference')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--out-dir', default=None, help='Dir to output file')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
return args
def inference_model(config_name, checkpoint, visualizer, args, logger=None):
cfg = Config.fromfile(config_name)
if args.aug:
raise NotImplementedError()
model = init_detector(
cfg, checkpoint, palette=args.palette, device=args.device)
visualizer.dataset_meta = model.dataset_meta
# test a single image
result = inference_detector(model, args.img)
# show the results
if args.show or args.out_dir is not None:
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
out_file = None
if args.out_dir is not None:
out_dir = args.out_dir
mmcv.mkdir_or_exist(out_dir)
out_file = osp.join(
out_dir,
config_name.split('/')[-1].replace('py', 'jpg'))
visualizer.add_datasample(
'result',
img,
pred_sample=result,
show=args.show,
wait_time=args.wait_time,
out_file=out_file,
pred_score_thr=args.score_thr)
return result
# Sample test whether the inference code is correct
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
config = Config.fromfile(args.config)
# init visualizer
visualizer_cfg = dict(type='DetLocalVisualizer', name='visualizer')
visualizer = VISUALIZERS.build(visualizer_cfg)
# test single model
if args.model_name:
if args.model_name in config:
model_infos = config[args.model_name]
if not isinstance(model_infos, list):
model_infos = [model_infos]
model_info = model_infos[0]
config_name = model_info['config'].strip()
print(f'processing: {config_name}', flush=True)
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args)
return
else:
raise RuntimeError('model name input error.')
# test all model
logger = MMLogger.get_instance(
name='MMLogger',
log_file='benchmark_test_image.log',
log_level=logging.ERROR)
for model_key in config:
model_infos = config[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'], flush=True)
config_name = model_info['config'].strip()
checkpoint = osp.join(args.checkpoint_root,
model_info['checkpoint'].strip())
try:
# build the model from a config file and a checkpoint file
inference_model(config_name, checkpoint, visualizer, args,
logger)
except Exception as e:
logger.error(f'{config_name} " : {repr(e)}')
if __name__ == '__main__':
args = parse_args()
main(args)
|
from datetime import datetime
import pytest
from langchain_core.exceptions import OutputParserException
from langchain.output_parsers.datetime import DatetimeOutputParser
def test_datetime_output_parser_parse() -> None:
parser = DatetimeOutputParser()
# Test valid input
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert result == date
# Test valid input
parser.format = "%Y-%m-%dT%H:%M:%S"
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert (
result.year == date.year
and result.month == date.month
and result.day == date.day
and result.hour == date.hour
and result.minute == date.minute
and result.second == date.second
)
# Test valid input
parser.format = "%H:%M:%S"
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert (
result.hour == date.hour
and result.minute == date.minute
and result.second == date.second
)
# Test invalid input
with pytest.raises(OutputParserException):
parser.parse("Invalid date string")
|
from datetime import datetime
from time import sleep
from langchain.output_parsers.datetime import DatetimeOutputParser
def test_datetime_output_parser_parse() -> None:
parser = DatetimeOutputParser()
# Test valid input
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert result == date
# Test valid input
parser.format = "%Y-%m-%dT%H:%M:%S"
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert (
result.year == date.year
and result.month == date.month
and result.day == date.day
and result.hour == date.hour
and result.minute == date.minute
and result.second == date.second
)
# Test valid input
parser.format = "%H:%M:%S"
date = datetime.now()
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert (
result.hour == date.hour
and result.minute == date.minute
and result.second == date.second
)
# Test invalid input
try:
sleep(0.001)
datestr = date.strftime(parser.format)
result = parser.parse(datestr)
assert result == date
assert False, "Should have raised AssertionError"
except AssertionError:
pass
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
# If you turn a schema class that inherits from union into a dataclass, please use
# this decorator to configure it. It's safe, faster and allows code sharing.
#
# For example, _union_dataclass customizes the __eq__ method to only check the type
# and value property instead of default implementation of dataclass which goes
# through every field in the dataclass.
@dataclass_transform(eq_default=False)
def _union_dataclass(cls: type[T]) -> type[T]:
assert issubclass(cls, _Union), f"{cls} must inheirt from {_Union}."
return dataclass(repr=False, eq=False)(cls)
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Union):
return False
return self.type == other.type and self.value == other.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
# mypy: allow-untyped-defs
import functools
from collections.abc import Hashable
from dataclasses import dataclass, fields
from typing import TypeVar
from typing_extensions import dataclass_transform
T = TypeVar("T", bound="_Union")
class _UnionTag(str):
__slots__ = ("_cls",)
_cls: Hashable
@staticmethod
def create(t, cls):
tag = _UnionTag(t)
assert not hasattr(tag, "_cls")
tag._cls = cls
return tag
def __eq__(self, cmp) -> bool:
assert isinstance(cmp, str)
other = str(cmp)
assert other in _get_field_names(self._cls), (
f"{other} is not a valid tag for {self._cls}. Available tags: {_get_field_names(self._cls)}"
)
return str(self) == other
def __hash__(self):
return hash(str(self))
@functools.cache
def _get_field_names(cls) -> set[str]:
return {f.name for f in fields(cls)}
# If you turn a schema class that inherits from union into a dataclass, please use
# this decorator to configure it. It's safe, faster and allows code sharing.
#
# For example, _union_dataclass customizes the __eq__ method to only check the type
# and value property instead of default implmentation of dataclass which goes
# through every field in the dataclass.
@dataclass_transform(eq_default=False)
def _union_dataclass(cls: type[T]) -> type[T]:
assert issubclass(cls, _Union), f"{cls} must inheirt from {_Union}."
return dataclass(repr=False, eq=False)(cls)
class _Union:
_type: _UnionTag
@classmethod
def create(cls, **kwargs):
assert len(kwargs) == 1
obj = cls(**{**{f.name: None for f in fields(cls)}, **kwargs}) # type: ignore[arg-type]
obj._type = _UnionTag.create(next(iter(kwargs.keys())), cls)
return obj
def __post_init__(self):
assert not any(
f.name in ("type", "_type", "create", "value")
for f in fields(self) # type: ignore[arg-type, misc]
)
@property
def type(self) -> str:
try:
return self._type
except AttributeError as e:
raise RuntimeError(
f"Please use {type(self).__name__}.create to instantiate the union type."
) from e
@property
def value(self):
return getattr(self, self.type)
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if attr is None and name in _get_field_names(type(self)) and name != self.type: # type: ignore[arg-type]
raise AttributeError(f"Field {name} is not set.")
return attr
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Union):
return False
return self.type == other.type and self.value == other.value
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"{type(self).__name__}({self.type}={getattr(self, self.type)})"
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
import json
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Food101(VisionDataset):
"""`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_.
The Food-101 is a challenging data set of 101 food categories with 101,000 images.
For each class, 250 manually reviewed test images are provided as well as 750 training images.
On purpose, the training images were not cleaned, and thus still contain some amount of noise.
This comes mostly in the form of intense colors and sometimes wrong labels. All images were
rescaled to have a maximum side length of 512 pixels.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
_MD5 = "85eeb15f3717b99a5da872d97d918f87"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = Path(self.root) / "food-101"
self._meta_folder = self._base_folder / "meta"
self._images_folder = self._base_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._labels = []
self._image_files = []
with open(self._meta_folder / f"{split}.json") as f:
metadata = json.loads(f.read())
self.classes = sorted(metadata.keys())
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
for class_label, im_rel_paths in metadata.items():
self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
self._image_files += [
self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
import json
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Food101(VisionDataset):
"""`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_.
The Food-101 is a challenging data set of 101 food categories with 101,000 images.
For each class, 250 manually reviewed test images are provided as well as 750 training images.
On purpose, the training images were not cleaned, and thus still contain some amount of noise.
This comes mostly in the form of intense colors and sometimes wrong labels. All images were
rescaled to have a maximum side length of 512 pixels.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
_MD5 = "85eeb15f3717b99a5da872d97d918f87"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = Path(self.root) / "food-101"
self._meta_folder = self._base_folder / "meta"
self._images_folder = self._base_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._labels = []
self._image_files = []
with open(self._meta_folder / f"{split}.json") as f:
metadata = json.loads(f.read())
self.classes = sorted(metadata.keys())
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
for class_label, im_rel_paths in metadata.items():
self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
self._image_files += [
self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=64,
batch_augments=[dict(type='BatchFixedSizePad', size=(640, 640))]),
backbone=dict(norm_eval=False),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(640, 640), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 50e
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross encoder
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# 1. We rank all sentences in the corpus for the query
ranks = model.rank(query, corpus)
# Print the scores
print("Query:", query)
for rank in ranks:
print(f"{rank['score']:.2f}\t{corpus[rank['corpus_id']]}")
# 2. Alternatively, you can also manually compute the score between two sentences
sentence_combinations = [[query, sentence] for sentence in corpus]
scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order to get the corpus indices
ranked_indices = np.argsort(scores)[::-1]
print("scores:", scores)
print("indices:", ranked_indices)
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross encoder
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# So we create the respective sentence combinations
sentence_combinations = [[query, corpus_sentence] for corpus_sentence in corpus]
# Compute the similarity scores for these combinations
similarity_scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order
sim_scores_argsort = reversed(np.argsort(similarity_scores))
# Print the scores
print("Query:", query)
for idx in sim_scores_argsort:
print("{:.2f}\t{}".format(similarity_scores[idx], corpus[idx]))
|
import importlib
import shutil
import threading
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
fsspec.register_implementation(fs_class.protocol, fs_class)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str):
"""
Renames the file `src` in `fs` to `dst`.
"""
is_local = not is_remote_filesystem(fs)
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst))
else:
fs.mv(src, dst, recursive=True)
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
import importlib
import threading
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_has_s3fs = importlib.util.find_spec("s3fs") is not None
if _has_s3fs:
from .s3filesystem import S3FileSystem # noqa: F401
COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [
compression.Bz2FileSystem,
compression.GzipFileSystem,
compression.Lz4FileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
fsspec.register_implementation(fs_class.protocol, fs_class)
def extract_path_from_uri(dataset_path: str) -> str:
"""
Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`).
Args:
dataset_path (`str`):
Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory.
"""
if "://" in dataset_path:
dataset_path = dataset_path.split("://")[1]
return dataset_path
def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool:
"""
Validates if filesystem has remote protocol.
Args:
fs (`fsspec.spec.AbstractFileSystem`):
An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`].
"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _reset_fsspec_lock() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
from typing import List
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 128.
max_gen_len (int, optional): The maximum length of generated sequences. Defaults to 64.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 4.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts: List[str] = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 128.
max_gen_len (int, optional): The maximum length of generated sequences. Defaults to 64.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 4.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import perspective_transform
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.image import affine_transform
from keras.src.ops.image import crop_images
from keras.src.ops.image import extract_patches
from keras.src.ops.image import hsv_to_rgb
from keras.src.ops.image import map_coordinates
from keras.src.ops.image import pad_images
from keras.src.ops.image import resize
from keras.src.ops.image import rgb_to_grayscale
from keras.src.ops.image import rgb_to_hsv
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.executor.utils import add_graph_execution_async
from backend.server.external.middleware import require_permission
from backend.util.settings import Settings
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = await add_graph_execution_async(
graph_id=graph_id,
user_id=api_key.user_id,
inputs=node_input,
graph_version=graph_version,
)
return {"id": graph_exec.id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.server.external.middleware import require_permission
from backend.server.routers import v1 as internal_api_routes
from backend.util.settings import Settings
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = await internal_api_routes.execute_graph(
graph_id=graph_id,
node_input=node_input,
user_id=api_key.user_id,
graph_version=graph_version,
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
preprocess_cfg=preprocess_cfg,
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=2048,
feat_channels=2048,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=2048,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=2048,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms=dict(type='nms', iou_threshold=0.7),
nms_pre=6000,
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
out_indices=(3, ),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
type='RPNHead',
in_channels=2048,
feat_channels=2048,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=2048,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=2048,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms=dict(type='nms', iou_threshold=0.7),
nms_pre=6000,
max_per_img=1000,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)))
|
import functools
import os
import os.path
import pathlib
from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import EncodedData, EncodedImage
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _prepare_sample(
data: Tuple[str, BinaryIO],
*,
root: pathlib.Path,
categories: List[str],
) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).relative_to(root).parts[0]
return dict(
path=path,
data=EncodedData.from_file(buffer),
label=Label.from_category(category, categories=categories),
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = FileOpener(dp, mode="rb")
return Mapper(dp, functools.partial(_prepare_sample, root=root, categories=categories)), categories
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = EncodedImage(sample.pop("data").data)
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
|
import functools
import os
import os.path
import pathlib
from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import EncodedData, EncodedImage
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _prepare_sample(
data: Tuple[str, BinaryIO],
*,
root: pathlib.Path,
categories: List[str],
) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).relative_to(root).parts[0]
return dict(
path=path,
data=EncodedData.from_file(buffer),
label=Label.from_category(category, categories=categories),
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = FileOpener(dp, mode="rb")
return Mapper(dp, functools.partial(_prepare_sample, root=root, categories=categories)), categories
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = EncodedImage(sample.pop("data").data)
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
|
from typing import Iterable, Dict, TYPE_CHECKING
import numpy as np
from docarray import DocumentArray
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.milvus.backend import (
_always_true_expr,
_ids_to_milvus_expr,
_batch_list,
)
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
def _get_doc_by_id(self, _id: str) -> 'Document':
# to be implemented
return self._get_docs_by_ids([_id])[0]
def _del_doc_by_id(self, _id: str):
# to be implemented
self._del_docs_by_ids([_id])
def _set_doc_by_id(self, _id: str, value: 'Document', **kwargs):
# to be implemented
self._set_docs_by_ids([_id], [value], None, **kwargs)
def _load_offset2ids(self):
if self._list_like:
collection = self._offset2id_collection
kwargs = self._update_kwargs_from_config('consistency_level', **dict())
with self.loaded_collection(collection):
res = collection.query(
expr=_always_true_expr('document_id'),
output_fields=['offset', 'document_id'],
**kwargs,
)
sorted_res = sorted(res, key=lambda k: int(k['offset']))
self._offset2ids = Offset2ID([r['document_id'] for r in sorted_res])
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
# delete old entries
self._clear_offset2ids_milvus()
# insert current entries
ids = self._offset2ids.ids
if not ids:
return
offsets = [str(i) for i in range(len(ids))]
dummy_vectors = [np.zeros(1) for _ in range(len(ids))]
collection = self._offset2id_collection
collection.insert([offsets, ids, dummy_vectors])
def _get_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
if not ids:
return DocumentArray()
ids = list(ids)
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
with self.loaded_collection():
docs = DocumentArray()
for id_batch in _batch_list(ids, kwargs['batch_size']):
res = self._collection.query(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
output_fields=['serialized'],
**kwargs,
)
if not res:
raise KeyError(f'No documents found for ids {ids}')
docs.extend(self._docs_from_query_response(res))
# sort output docs according to input id sorting
return DocumentArray([docs[d] for d in ids])
def _del_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}', **kwargs
)
def _set_docs_by_ids(
self, ids, docs: 'Iterable[Document]', mismatch_ids: 'Dict', **kwargs
):
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
# delete old entries
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
**kwargs,
)
for docs_batch in _batch_list(list(docs), kwargs['batch_size']):
# insert new entries
payload = self._docs_to_milvus_payload(docs_batch)
self._collection.insert(payload, **kwargs)
def _clear_storage(self):
self._collection.drop()
self._create_or_reuse_collection()
self._clear_offset2ids_milvus()
def _clear_offset2ids_milvus(self):
self._offset2id_collection.drop()
self._create_or_reuse_offset2id_collection()
|
from typing import Iterable, Dict, TYPE_CHECKING
import numpy as np
from docarray import DocumentArray
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.storage.milvus.backend import (
_always_true_expr,
_ids_to_milvus_expr,
_batch_list,
)
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
def _get_doc_by_id(self, _id: str) -> 'Document':
# to be implemented
return self._get_docs_by_ids([_id])[0]
def _del_doc_by_id(self, _id: str):
# to be implemented
self._del_docs_by_ids([_id])
def _set_doc_by_id(self, _id: str, value: 'Document', **kwargs):
# to be implemented
self._set_docs_by_ids([_id], [value], None, **kwargs)
def _load_offset2ids(self):
if self._list_like:
collection = self._offset2id_collection
kwargs = self._update_kwargs_from_config('consistency_level', **dict())
with self.loaded_collection(collection):
res = collection.query(
expr=_always_true_expr('document_id'),
output_fields=['offset', 'document_id'],
**kwargs,
)
sorted_res = sorted(res, key=lambda k: int(k['offset']))
self._offset2ids = Offset2ID([r['document_id'] for r in sorted_res])
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
# delete old entries
self._clear_offset2ids_milvus()
# insert current entries
ids = self._offset2ids.ids
if not ids:
return
offsets = [str(i) for i in range(len(ids))]
dummy_vectors = [np.zeros(1) for _ in range(len(ids))]
collection = self._offset2id_collection
collection.insert([offsets, ids, dummy_vectors])
def _get_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
if not ids:
return DocumentArray()
ids = list(ids)
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
with self.loaded_collection():
docs = DocumentArray()
for id_batch in _batch_list(ids, kwargs['batch_size']):
res = self._collection.query(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
output_fields=['serialized'],
**kwargs,
)
if not res:
raise KeyError(f'No documents found for ids {ids}')
docs.extend(self._docs_from_query_response(res))
# sort output docs according to input id sorting
id_to_index = {id_: i for i, id_ in enumerate(ids)}
return DocumentArray(sorted(docs, key=lambda d: id_to_index[d.id]))
def _del_docs_by_ids(self, ids: 'Iterable[str]', **kwargs) -> 'DocumentArray':
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}', **kwargs
)
def _set_docs_by_ids(
self, ids, docs: 'Iterable[Document]', mismatch_ids: 'Dict', **kwargs
):
kwargs = self._update_kwargs_from_config('consistency_level', **kwargs)
kwargs = self._update_kwargs_from_config('batch_size', **kwargs)
# delete old entries
for id_batch in _batch_list(list(ids), kwargs['batch_size']):
self._collection.delete(
expr=f'document_id in {_ids_to_milvus_expr(id_batch)}',
**kwargs,
)
for docs_batch in _batch_list(list(docs), kwargs['batch_size']):
# insert new entries
payload = self._docs_to_milvus_payload(docs_batch)
self._collection.insert(payload, **kwargs)
def _clear_storage(self):
self._collection.drop()
self._create_or_reuse_collection()
self._clear_offset2ids_milvus()
def _clear_offset2ids_milvus(self):
self._offset2id_collection.drop()
self._create_or_reuse_offset2id_collection()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding,
SinePositionalEncoding3D)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder',
'SinePositionalEncoding3D'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .activations import SiLU
from .bbox_nms import fast_nms, multiclass_nms
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .dropblock import DropBlock
from .ema import ExpMomentumEMA
from .inverted_residual import InvertedResidual
from .matrix_nms import mask_matrix_nms
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .normed_predictor import NormedConv2d, NormedLinear
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import ChannelAttention, DyReLU, SELayer
# yapf: disable
from .transformer import (MLP, AdaptivePadding, CdnQueryGenerator,
ConditionalAttention,
ConditionalDetrTransformerDecoder,
ConditionalDetrTransformerDecoderLayer,
DABDetrTransformerDecoder,
DABDetrTransformerDecoderLayer,
DABDetrTransformerEncoder,
DeformableDetrTransformerDecoder,
DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder,
DeformableDetrTransformerEncoderLayer,
DetrTransformerDecoder, DetrTransformerDecoderLayer,
DetrTransformerEncoder, DetrTransformerEncoderLayer,
DinoTransformerDecoder, DynamicConv,
Mask2FormerTransformerDecoder,
Mask2FormerTransformerDecoderLayer,
Mask2FormerTransformerEncoder, PatchEmbed,
PatchMerging, coordinate_to_encoding,
inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
# yapf: enable
__all__ = [
'fast_nms', 'multiclass_nms', 'mask_matrix_nms', 'DropBlock',
'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder', 'ResLayer', 'PatchMerging',
'SinePositionalEncoding', 'LearnedPositionalEncoding', 'DynamicConv',
'SimplifiedBasicBlock', 'NormedLinear', 'NormedConv2d', 'InvertedResidual',
'SELayer', 'ConvUpsample', 'CSPLayer', 'adaptive_avg_pool2d',
'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc', 'nlc_to_nchw', 'DyReLU',
'ExpMomentumEMA', 'inverse_sigmoid', 'ChannelAttention', 'SiLU', 'MLP',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer', 'AdaptivePadding',
'coordinate_to_encoding', 'ConditionalAttention',
'DABDetrTransformerDecoderLayer', 'DABDetrTransformerDecoder',
'DABDetrTransformerEncoder', 'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator', 'Mask2FormerTransformerEncoder',
'Mask2FormerTransformerDecoderLayer', 'Mask2FormerTransformerDecoder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
from .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler
__all__ = [
'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',
'InfiniteGroupBatchSampler', 'InfiniteBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
__all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import torch
class AttnProcsLayers(torch.nn.Module):
def __init__(self, state_dict: Dict[str, torch.Tensor]):
super().__init__()
self.layers = torch.nn.ModuleList(state_dict.values())
self.mapping = dict(enumerate(state_dict.keys()))
self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
# .processor for unet, .self_attn for text encoder
self.split_keys = [".processor", ".self_attn"]
# we add a hook to state_dict() and load_state_dict() so that the
# naming fits with `unet.attn_processors`
def map_to(module, state_dict, *args, **kwargs):
new_state_dict = {}
for key, value in state_dict.items():
num = int(key.split(".")[1]) # 0 is always "layers"
new_key = key.replace(f"layers.{num}", module.mapping[num])
new_state_dict[new_key] = value
return new_state_dict
def remap_key(key, state_dict):
for k in self.split_keys:
if k in key:
return key.split(k)[0] + k
raise ValueError(
f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
)
def map_from(module, state_dict, *args, **kwargs):
all_keys = list(state_dict.keys())
for key in all_keys:
replace_key = remap_key(key, state_dict)
new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
state_dict[new_key] = state_dict[key]
del state_dict[key]
self._register_state_dict_hook(map_to)
self._register_load_state_dict_pre_hook(map_from, with_module=True)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import torch
class AttnProcsLayers(torch.nn.Module):
def __init__(self, state_dict: Dict[str, torch.Tensor]):
super().__init__()
self.layers = torch.nn.ModuleList(state_dict.values())
self.mapping = dict(enumerate(state_dict.keys()))
self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
# .processor for unet, .self_attn for text encoder
self.split_keys = [".processor", ".self_attn"]
# we add a hook to state_dict() and load_state_dict() so that the
# naming fits with `unet.attn_processors`
def map_to(module, state_dict, *args, **kwargs):
new_state_dict = {}
for key, value in state_dict.items():
num = int(key.split(".")[1]) # 0 is always "layers"
new_key = key.replace(f"layers.{num}", module.mapping[num])
new_state_dict[new_key] = value
return new_state_dict
def remap_key(key, state_dict):
for k in self.split_keys:
if k in key:
return key.split(k)[0] + k
raise ValueError(
f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
)
def map_from(module, state_dict, *args, **kwargs):
all_keys = list(state_dict.keys())
for key in all_keys:
replace_key = remap_key(key, state_dict)
new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
state_dict[new_key] = state_dict[key]
del state_dict[key]
self._register_state_dict_hook(map_to)
self._register_load_state_dict_pre_hook(map_from, with_module=True)
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example documents
corpus = [
"Machine learning is a field of study that gives computers the ability to learn without being explicitly programmed.",
"Deep learning is part of a broader family of machine learning methods based on artificial neural networks with representation learning.",
"Neural networks are computing systems vaguely inspired by the biological neural networks that constitute animal brains.",
"Mars rovers are robotic vehicles designed to travel on the surface of Mars to collect data and perform experiments.",
"The James Webb Space Telescope is the largest optical telescope in space, designed to conduct infrared astronomy.",
"SpaceX's Starship is designed to be a fully reusable transportation system capable of carrying humans to Mars and beyond.",
"Global warming is the long-term heating of Earth's climate system observed since the pre-industrial period due to human activities.",
"Renewable energy sources include solar, wind, hydro, and geothermal power that naturally replenish over time.",
"Carbon capture technologies aim to collect CO2 emissions before they enter the atmosphere and store them underground.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode_document(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"How do artificial neural networks work?",
"What technology is used for modern space exploration?",
"How can we address climate change challenges?",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode_query(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(f"(Score: {score:.4f})", corpus[idx])
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], f"(Score: {score:.4f})")
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset column names to Router routes, like "query" or "document". This is used to specify
which Router submodule to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of column names to routes.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter name regular expressions to learning rates. This allows you to set different
learning rates for different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is
useful when you want to fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False))))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
rpn_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
bbox_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=2,
aligned=False)),
bbox_head=dict(
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
roi_layer=dict(
type='RoIAlign',
output_size=14,
sampling_ratio=2,
aligned=False))))
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import VQModel
from diffusers.utils.testing_utils import (
backend_manual_seed,
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = VQModel
main_input_name = "sample"
@property
def dummy_input(self, sizes=(32, 32)):
batch_size = 4
num_channels = 3
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [8, 16],
"norm_num_groups": 8,
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
@unittest.skip("Test not supported.")
def test_forward_signature(self):
pass
@unittest.skip("Test not supported.")
def test_training(self):
pass
def test_from_pretrained_hub(self):
model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True)
self.assertIsNotNone(model)
self.assertEqual(len(loading_info["missing_keys"]), 0)
model.to(torch_device)
image = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def test_output_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).sample
output_slice = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143])
# fmt: on
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_loss_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).commit_loss.cpu()
# fmt: off
expected_output = torch.tensor([0.1936])
# fmt: on
self.assertTrue(torch.allclose(output, expected_output, atol=1e-3))
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import VQModel
from diffusers.utils.testing_utils import (
backend_manual_seed,
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = VQModel
main_input_name = "sample"
@property
def dummy_input(self, sizes=(32, 32)):
batch_size = 4
num_channels = 3
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
return {"sample": image}
@property
def input_shape(self):
return (3, 32, 32)
@property
def output_shape(self):
return (3, 32, 32)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": [8, 16],
"norm_num_groups": 8,
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
@unittest.skip("Test not supported.")
def test_forward_signature(self):
pass
@unittest.skip("Test not supported.")
def test_training(self):
pass
def test_from_pretrained_hub(self):
model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True)
self.assertIsNotNone(model)
self.assertEqual(len(loading_info["missing_keys"]), 0)
model.to(torch_device)
image = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def test_output_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).sample
output_slice = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143])
# fmt: on
self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
def test_loss_pretrained(self):
model = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(torch_device).eval()
torch.manual_seed(0)
backend_manual_seed(torch_device, 0)
image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
image = image.to(torch_device)
with torch.no_grad():
output = model(image).commit_loss.cpu()
# fmt: off
expected_output = torch.tensor([0.1936])
# fmt: on
self.assertTrue(torch.allclose(output, expected_output, atol=1e-3))
|
_base_ = './solov2_r50_fpn_ms-3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(checkpoint='torchvision://resnet101'),
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
mask_head=dict(
mask_feature_head=dict(conv_cfg=dict(type='DCNv2')),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=True))
|
_base_ = 'solov2_r50_fpn_mstrain_3x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(checkpoint='torchvision://resnet101'),
dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
mask_head=dict(
mask_feature_head=dict(conv_cfg=dict(type='DCNv2')),
dcn_cfg=dict(type='DCNv2'),
dcn_apply_to_all_conv=True))
|
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_32xb2-ssj-scp-270k_coco.py' # noqa
# training schedule for 90k
max_iters = 90000
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[81000, 85500, 87750],
gamma=0.1)
]
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py'
# training schedule for 90k
max_iters = 90000
# learning rate policy
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=90000,
by_epoch=False,
milestones=[81000, 85500, 87750],
gamma=0.1)
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from dpr_reader import DPRReaderRanker
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize('request_size', [1, 8, 50])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(
text='just some random text here',
matches=[Document(text='random text', tags={'title': 'random title'})],
)
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=DPRReaderRanker) as flow:
resp = flow.post(
on='/search',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...dpr_reader import DPRReaderRanker
@pytest.mark.parametrize('request_size', [1, 8, 50])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(
text='just some random text here',
matches=[Document(text='random text', tags={'title': 'random title'})],
)
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=DPRReaderRanker) as flow:
resp = flow.post(
on='/search',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GENERIC_WEBHOOK = "generic_webhook"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
APOLLO = "apollo"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SCREENSHOTONE = "screenshotone"
SLANT3D = "slant3d"
SMARTLEAD = "smartlead"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
ZEROBOUNCE = "zerobounce"
# --8<-- [end:ProviderName]
|
from __future__ import annotations
__version__ = "3.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
from __future__ import annotations
__version__ = "3.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
import openvino.runtime.opset14 as ov_opset
from openvino import Type
from keras.src.backend.openvino.core import OpenVINOKerasTensor
from keras.src.backend.openvino.core import get_ov_output
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
raise NotImplementedError(
"`segment_sum` is not supported with openvino backend"
)
def segment_max(data, segment_ids, num_segments=None, sorted=False):
raise NotImplementedError(
"`segment_max` is not supported with openvino backend"
)
def top_k(x, k, sorted=True):
raise NotImplementedError("`top_k` is not supported with openvino backend")
def in_top_k(targets, predictions, k):
raise NotImplementedError(
"`in_top_k` is not supported with openvino backend"
)
def logsumexp(x, axis=None, keepdims=False):
x = get_ov_output(x)
if axis is None:
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
axis = 0
if isinstance(axis, tuple):
axis = list(axis)
axis = ov_opset.constant(axis, Type.i32).output(0)
const_zero = ov_opset.constant(0, x.get_element_type()).output(0)
reduce_max = ov_opset.reduce_max(x, axis, keepdims).output(0)
is_finite = ov_opset.is_finite(reduce_max).output(0)
norm_max = ov_opset.select(is_finite, reduce_max, const_zero).output(0)
norm_max_sub = ov_opset.subtract(x, norm_max).output(0)
exp_norm_max = ov_opset.exp(norm_max_sub).output(0)
sum_exp = ov_opset.reduce_sum(exp_norm_max, axis, keepdims).output(0)
log_sum_exp = ov_opset.log(sum_exp).output(0)
log_sum_exp = ov_opset.add(norm_max, log_sum_exp).output(0)
return OpenVINOKerasTensor(log_sum_exp)
def qr(x, mode="reduced"):
raise NotImplementedError("`qr` is not supported with openvino backend")
def extract_sequences(x, sequence_length, sequence_stride):
raise NotImplementedError(
"`extract_sequences` is not supported with openvino backend"
)
def fft(x):
raise NotImplementedError("`fft` is not supported with openvino backend")
def fft2(x):
raise NotImplementedError("`fft2` is not supported with openvino backend")
def rfft(x, fft_length=None):
raise NotImplementedError("`rfft` is not supported with openvino backend")
def irfft(x, fft_length=None):
raise NotImplementedError("`irfft` is not supported with openvino backend")
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
raise NotImplementedError("`stft` is not supported with openvino backend")
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
raise NotImplementedError("`istft` is not supported with openvino backend")
def rsqrt(x):
x = get_ov_output(x)
const_one = ov_opset.constant(1, x.get_element_type()).output(0)
sqrt = ov_opset.sqrt(x).output(0)
return OpenVINOKerasTensor(ov_opset.divide(const_one, sqrt).output(0))
def erf(x):
x = get_ov_output(x)
erf = ov_opset.erf(x).output(0)
return OpenVINOKerasTensor(erf)
def erfinv(x):
raise NotImplementedError("`erfinv` is not supported with openvino backend")
def solve(a, b):
raise NotImplementedError("`solve` is not supported with openvino backend")
def norm(x, ord=None, axis=None, keepdims=False):
raise NotImplementedError("`norm` is not supported with openvino backend")
|
import openvino.runtime.opset14 as ov_opset
from openvino import Type
from keras.src.backend.openvino.core import OpenVINOKerasTensor
from keras.src.backend.openvino.core import get_ov_output
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
raise NotImplementedError(
"`segment_sum` is not supported with openvino backend"
)
def segment_max(data, segment_ids, num_segments=None, sorted=False):
raise NotImplementedError(
"`segment_max` is not supported with openvino backend"
)
def top_k(x, k, sorted=False):
raise NotImplementedError("`top_k` is not supported with openvino backend")
def in_top_k(targets, predictions, k):
raise NotImplementedError(
"`in_top_k` is not supported with openvino backend"
)
def logsumexp(x, axis=None, keepdims=False):
x = get_ov_output(x)
if axis is None:
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
axis = 0
if isinstance(axis, tuple):
axis = list(axis)
axis = ov_opset.constant(axis, Type.i32).output(0)
const_zero = ov_opset.constant(0, x.get_element_type()).output(0)
reduce_max = ov_opset.reduce_max(x, axis, keepdims).output(0)
is_finite = ov_opset.is_finite(reduce_max).output(0)
norm_max = ov_opset.select(is_finite, reduce_max, const_zero).output(0)
norm_max_sub = ov_opset.subtract(x, norm_max).output(0)
exp_norm_max = ov_opset.exp(norm_max_sub).output(0)
sum_exp = ov_opset.reduce_sum(exp_norm_max, axis, keepdims).output(0)
log_sum_exp = ov_opset.log(sum_exp).output(0)
log_sum_exp = ov_opset.add(norm_max, log_sum_exp).output(0)
return OpenVINOKerasTensor(log_sum_exp)
def qr(x, mode="reduced"):
raise NotImplementedError("`qr` is not supported with openvino backend")
def extract_sequences(x, sequence_length, sequence_stride):
raise NotImplementedError(
"`extract_sequences` is not supported with openvino backend"
)
def fft(x):
raise NotImplementedError("`fft` is not supported with openvino backend")
def fft2(x):
raise NotImplementedError("`fft2` is not supported with openvino backend")
def rfft(x, fft_length=None):
raise NotImplementedError("`rfft` is not supported with openvino backend")
def irfft(x, fft_length=None):
raise NotImplementedError("`irfft` is not supported with openvino backend")
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
raise NotImplementedError("`stft` is not supported with openvino backend")
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
raise NotImplementedError("`istft` is not supported with openvino backend")
def rsqrt(x):
x = get_ov_output(x)
const_one = ov_opset.constant(1, x.get_element_type()).output(0)
sqrt = ov_opset.sqrt(x).output(0)
return OpenVINOKerasTensor(ov_opset.divide(const_one, sqrt).output(0))
def erf(x):
x = get_ov_output(x)
erf = ov_opset.erf(x).output(0)
return OpenVINOKerasTensor(erf)
def erfinv(x):
raise NotImplementedError("`erfinv` is not supported with openvino backend")
def solve(a, b):
raise NotImplementedError("`solve` is not supported with openvino backend")
def norm(x, ord=None, axis=None, keepdims=False):
raise NotImplementedError("`norm` is not supported with openvino backend")
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='ATSS',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='ATSS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
"""Valyu tool spec."""
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class ValyuToolSpec(BaseToolSpec):
"""Valyu tool spec."""
spec_functions = [
"context",
]
def __init__(
self,
api_key: str,
verbose: bool = False,
max_price: Optional[float] = 100,
) -> None:
"""Initialize with parameters."""
from valyu import Valyu
self.client = Valyu(api_key=api_key)
self._verbose = verbose
self._max_price = max_price
def context(
self,
query: str,
search_type: str = "both",
data_sources: Optional[List[str]] = None,
max_num_results: int = 10,
max_price: Optional[float] = None,
query_rewrite: bool = True,
similarity_threshold: float = 0.5,
) -> List[Document]:
"""
Find relevant programmaticly licensed proprietary content and the web to answer your query.
Args:
query (str): The question or topic to search for
search_type (str): Type of sources to search - "proprietary", "web", or "both"
data_sources (Optional[List[str]]): Specific indexes to query from
max_num_results (int): Maximum number of results to return. Defaults to 10
max_price (Optional[float]): Maximum price per content in PCM. Defaults to 100
query_rewrite (bool): Whether to rewrite the query to improve results. Defaults to True
similarity_threshold (float): Minimum similarity score for results. Defaults to 0.5
Returns:
List[Document]: List of Document objects containing the search results
"""
if max_price is None:
max_price = self._max_price
response = self.client.context(
query=query,
search_type=search_type,
data_sources=data_sources,
max_num_results=max_num_results,
max_price=max_price,
query_rewrite=query_rewrite,
similarity_threshold=similarity_threshold,
)
if self._verbose:
print(f"[Valyu Tool] Response: {response}")
return [
Document(
text=result.content,
metadata={
"title": result.title,
"url": result.url,
"source": result.source,
"price": result.price,
},
)
for result in response.results
]
|
"""Valyu tool spec."""
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class ValyuToolSpec(BaseToolSpec):
"""Valyu tool spec."""
spec_functions = [
"context",
]
def __init__(
self,
api_key: str,
verbose: bool = False,
max_price: Optional[float] = 100,
) -> None:
"""Initialize with parameters."""
from valyu import Valyu
self.client = Valyu(api_key=api_key)
self._verbose = verbose
self._max_price = max_price
def context(
self,
query: str,
search_type: str = "both",
data_sources: Optional[List[str]] = None,
max_num_results: int = 10,
max_price: Optional[float] = None,
query_rewrite: bool = True,
similarity_threshold: float = 0.5,
) -> List[Document]:
"""Find relevant programmaticly licensed proprietary content and the web to answer your query.
Args:
query (str): The question or topic to search for
search_type (str): Type of sources to search - "proprietary", "web", or "both"
data_sources (Optional[List[str]]): Specific indexes to query from
max_num_results (int): Maximum number of results to return. Defaults to 10
max_price (Optional[float]): Maximum price per content in PCM. Defaults to 100
query_rewrite (bool): Whether to rewrite the query to improve results. Defaults to True
similarity_threshold (float): Minimum similarity score for results. Defaults to 0.5
Returns:
List[Document]: List of Document objects containing the search results
"""
if max_price is None:
max_price = self._max_price
response = self.client.context(
query=query,
search_type=search_type,
data_sources=data_sources,
max_num_results=max_num_results,
max_price=max_price,
query_rewrite=query_rewrite,
similarity_threshold=similarity_threshold,
)
if self._verbose:
print(f"[Valyu Tool] Response: {response}")
return [
Document(
text=result.content,
metadata={
"title": result.title,
"url": result.url,
"source": result.source,
"price": result.price,
},
)
for result in response.results
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet_v2 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v2 import MobileNetV2
from keras.src.applications.mobilenet_v2 import decode_predictions
from keras.src.applications.mobilenet_v2 import preprocess_input
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from contextlib import contextmanager
from typing import Optional
import torch
from mmengine import print_log
from mmengine.utils import TORCH_VERSION, digit_version
@contextmanager
def autocast(device_type: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
enabled: bool = True,
cache_enabled: Optional[bool] = None):
"""A wrapper of ``torch.autocast`` and ``toch.cuda.amp.autocast``.
Pytorch 1.5.0 provide ``torch.cuda.amp.autocast`` for running in
mixed precision , and update it to ``torch.autocast`` in 1.10.0.
Both interfaces have different arguments, and ``torch.autocast``
support running with cpu additionally.
This function provides a unified interface by wrapping
``torch.autocast`` and ``torch.cuda.amp.autocast``, which resolves the
compatibility issues that ``torch.cuda.amp.autocast`` does not support
running mixed precision with cpu, and both contexts have different
arguments. We suggest users using this function in the code
to achieve maximized compatibility of different PyTorch versions.
Note:
``autocast`` requires pytorch version >= 1.5.0. If pytorch version
<= 1.10.0 and cuda is not available, it will raise an error with
``enabled=True``, since ``torch.cuda.amp.autocast`` only support cuda
mode.
Examples:
>>> # case1: 1.10 > Pytorch version >= 1.5.0
>>> with autocast():
>>> # run in mixed precision context
>>> pass
>>> with autocast(device_type='cpu')::
>>> # raise error, torch.cuda.amp.autocast only support cuda mode.
>>> pass
>>> # case2: Pytorch version >= 1.10.0
>>> with autocast():
>>> # default cuda mixed precision context
>>> pass
>>> with autocast(device_type='cpu'):
>>> # cpu mixed precision context
>>> pass
>>> with autocast(
>>> device_type='cuda', enabled=True, cache_enabled=True):
>>> # enable precision context with more specific arguments.
>>> pass
Args:
device_type (str, required): Whether to use 'cuda' or 'cpu' device.
enabled(bool): Whether autocasting should be enabled in the region.
Defaults to True
dtype (torch_dtype, optional): Whether to use ``torch.float16`` or
``torch.bfloat16``.
cache_enabled(bool, optional): Whether the weight cache inside
autocast should be enabled.
"""
# If `enabled` is True, enable an empty context and all calculations
# are performed under fp32.
assert digit_version(TORCH_VERSION) >= digit_version('1.5.0'), (
'The minimum pytorch version requirements of mmengine is 1.5.0, but '
f'got {TORCH_VERSION}')
if (digit_version('1.5.0') <= digit_version(TORCH_VERSION) <
digit_version('1.10.0')):
# If pytorch version is between 1.5.0 and 1.10.0, the default value of
# dtype for `torch.cuda.amp.autocast` is torch.float16.
assert device_type == 'cuda' or device_type is None, (
'Pytorch version under 1.5.0 only supports running automatic '
'mixed training with cuda')
if dtype is not None or cache_enabled is not None:
print_log(
f'{dtype} and {device_type} will not work for '
'`autocast` since your Pytorch version: '
f'{TORCH_VERSION} <= 1.10.0',
logger='current',
level=logging.WARNING)
if torch.cuda.is_available():
with torch.cuda.amp.autocast(enabled=enabled):
yield
else:
if not enabled:
yield
else:
raise RuntimeError(
'If pytorch versions is between 1.5.0 and 1.10, '
'`autocast` is only available in gpu mode')
else:
if torch.cuda.is_available():
device_type = 'cuda' if device_type is None else device_type
else:
device_type = 'cpu' if device_type is None else device_type
if digit_version(TORCH_VERSION) < digit_version('1.11.0'):
if dtype is not None and dtype != torch.bfloat16:
print_log(
f'{dtype} must be `torch.bfloat16` with Pytorch '
f'version: {TORCH_VERSION}',
logger='current',
level=logging.WARNING)
dtype = torch.bfloat16
with torch.autocast(
device_type=device_type,
enabled=enabled,
dtype=dtype,
cache_enabled=cache_enabled):
yield
|
# Copyright (c) OpenMMLab. All rights reserved.
from contextlib import contextmanager
import torch
from mmengine.utils import TORCH_VERSION, digit_version
@contextmanager
def autocast(enabled: bool = True, **kwargs):
"""A wrapper of ``torch.autocast`` and ``toch.cuda.amp.autocast``.
Pytorch 1.6.0 provide ``torch.cuda.amp.autocast`` for running in
mixed precision , and update it to ``torch.autocast`` in 1.10.0.
Both interfaces have different arguments, and ``torch.autocast``
support running with cpu additionally.
This function provides a unified interface by wrapping
``torch.autocast`` and ``torch.cuda.amp.autocast``, which resolves the
compatibility issues that ``torch.cuda.amp.autocast`` does not support
running mixed precision with cpu, and both contexts have different
arguments. We suggest users using this function in the code
to achieve maximized compatibility of different PyTorch versions.
Note:
``autocast`` requires pytorch version >= 1.5.0. If pytorch version
<= 1.10.0 and cuda is not available, it will raise an error with
``enabled=True``, since ``torch.cuda.amp.autocast`` only support cuda
mode.
Examples:
>>> # case1: 1.10 > Pytorch version >= 1.5.0
>>> with autocast():
>>> # run in mixed precision context
>>> pass
>>> with autocast(device_type='cpu')::
>>> # raise error, torch.cuda.amp.autocast only support cuda mode.
>>> pass
>>> # case2: Pytorch version >= 1.10.0
>>> with autocast():
>>> # default cuda mixed precision context
>>> pass
>>> with autocast(device_type='cpu'):
>>> # cpu mixed precision context
>>> pass
>>> with autocast(
>>> device_type='cuda', enabled=True, cache_enabled=True):
>>> # enable precision context with more specific arguments.
>>> pass
Args:
enabled (bool): Whether autocasting should be enabled in the region.
Defaults to True.
kwargs (dict): Arguments of torch.autocast except for ``enabled``.
"""
# If `enabled` is True, enable an empty context and all calculations
# are performed under fp32.
assert digit_version(TORCH_VERSION) >= digit_version('1.5.0'), (
'The minimum pytorch version requirements of mmengine is 1.5.0, but '
f'got {TORCH_VERSION}')
if (digit_version('1.5.0') <= digit_version(TORCH_VERSION) <
digit_version('1.10.0')):
# If pytorch version is between 1.5.0 and 1.10.0, the default value of
# dtype for `torch.cuda.amp.autocast` is torch.float16.
assert not kwargs, (
f'autocast under pytorch {TORCH_VERSION} only accept `enabled` '
'arguments.')
if torch.cuda.is_available():
with torch.cuda.amp.autocast(enabled=enabled):
yield
else:
if not enabled:
yield
else:
raise RuntimeError(
'If pytorch versions is between 1.5.0 and 1.10, '
'`autocast` is only available in gpu mode')
elif (digit_version('1.11.0') > digit_version(TORCH_VERSION) >=
digit_version('1.10.0')):
if torch.cuda.is_available():
kwargs.setdefault('device_type', 'cuda')
else:
kwargs.setdefault('device_type', 'cpu')
# torch.autocast only support `dtype=torch.bfloat16` in
# pytorch 1.10
kwargs.setdefault('dtype', torch.bfloat16)
with torch.autocast(enabled=enabled, **kwargs):
yield
elif digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
if torch.cuda.is_available():
kwargs.setdefault('device_type', 'cuda')
else:
kwargs.setdefault('device_type', 'cpu')
with torch.autocast(enabled=enabled, **kwargs):
yield
|
from typing import TYPE_CHECKING, Type, Optional
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
from docarray.proto.docarray_pb2 import DocumentProto
class ProtobufMixin:
@classmethod
def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T':
from docarray.proto.io import parse_proto
return parse_proto(pb_msg)
def to_protobuf(self, ndarray_type: Optional[str] = None) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``, if set it will force all ndarray-like object to be ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
from docarray.proto.io import flush_proto
return flush_proto(self, ndarray_type)
|
from typing import TYPE_CHECKING, Type, Optional
if TYPE_CHECKING:
from docarray.typing import T
from docarray.proto.docarray_pb2 import DocumentProto
class ProtobufMixin:
@classmethod
def from_protobuf(cls: Type['T'], pb_msg: 'DocumentProto') -> 'T':
from docarray.proto.io import parse_proto
return parse_proto(pb_msg)
def to_protobuf(self, ndarray_type: Optional[str] = None) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:param ndarray_type: can be ``list`` or ``numpy``, if set it will force all ndarray-like object to be ``List`` or ``numpy.ndarray``.
:return: the protobuf message
"""
from docarray.proto.io import flush_proto
return flush_proto(self, ndarray_type)
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import os
import sentence_transformers
import tarfile
import gzip
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-dev.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-train.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import os
import sentence_transformers
import tarfile
import gzip
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-dev.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-train.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path
from typing import Optional
import mmengine
from mmdet.registry import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class V3DetDataset(CocoDataset):
"""Dataset for V3Det."""
METAINFO = {
'classes': None,
'palette': None,
}
def __init__(
self,
*args,
metainfo: Optional[dict] = None,
data_root: str = '',
label_file='annotations/category_name_13204_v3det_2023_v1.txt', # noqa
**kwargs) -> None:
class_names = tuple(
mmengine.list_from_file(os.path.join(data_root, label_file)))
if metainfo is None:
metainfo = {'classes': class_names}
super().__init__(
*args, data_root=data_root, metainfo=metainfo, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmengine
from mmdet.registry import DATASETS
from .coco import CocoDataset
V3DET_CLASSES = tuple(
mmengine.list_from_file(
'configs/v3det/category_name_13204_v3det_2023_v1.txt'))
@DATASETS.register_module()
class V3DetDataset(CocoDataset):
"""Dataset for V3Det."""
METAINFO = {
'classes': V3DET_CLASSES,
'palette': None, # TODO: add palette
}
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then k-mean clustering is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import KMeans
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Perform kmean clustering
num_clusters = 5
clustering_model = KMeans(n_clusters=num_clusters)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = [[] for i in range(num_clusters)]
for sentence_id, cluster_id in enumerate(cluster_assignment):
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in enumerate(clustered_sentences):
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
Use scikit-learn regressor interface with CPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
def main(client):
# generate some random data for demonstration
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = dxgb.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method="hist")
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Use scikit-learn regressor interface with CPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
def main(client):
# generate some random data for demonstration
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = dxgb.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method="hist")
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import logging
@pytest.fixture(autouse=True)
def set_logger_level():
logger = logging.getLogger('docarray')
logger.setLevel(logging.INFO)
|
import pytest
import logging
@pytest.fixture(autouse=True)
def set_logger_level():
logger = logging.getLogger('docarray')
logger.setLevel(logging.INFO)
|
__version__ = '0.13.25'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.24'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import CutMix, MixUp, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomChannelPermutation,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat
from ._misc import (
ConvertImageDtype,
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
SanitizeBoundingBoxes,
ToDtype,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImage, ToPILImage, ToPureTensor
from ._deprecated import ToTensor # usort: skip
|
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
]
|
from .Transformer import Transformer
from .Asym import Asym
from .BoW import BoW
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Normalize import Normalize
from .Pooling import Pooling
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
from .CLIPModel import CLIPModel
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize as deserialize
from keras.src.dtype_policies import get as get
from keras.src.dtype_policies import serialize as serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedDTypePolicy as QuantizedDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedFloat8DTypePolicy as QuantizedFloat8DTypePolicy,
)
from keras.src.dtype_policies.dtype_policy_map import (
DTypePolicyMap as DTypePolicyMap,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize
from keras.src.dtype_policies import get
from keras.src.dtype_policies import serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.session.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio()
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio()
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio()
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio()
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await asyncio.gather(stream_1, stream_2)
@pytest.mark.asyncio()
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert await handler_2.ctx.get("cur_count") == 2
|
import asyncio
import pytest
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import Event, StartEvent, StopEvent
from llama_index.core.workflow.workflow import Workflow
from llama_index.core.workflow.errors import WorkflowRuntimeError, WorkflowTimeoutError
from .conftest import OneTestEvent
class StreamingWorkflow(Workflow):
@step
async def chat(self, ctx: Context, ev: StartEvent) -> StopEvent:
async def stream_messages():
resp = "Paul Graham is a British-American computer scientist, entrepreneur, vc, and writer."
for word in resp.split():
yield word
async for w in stream_messages():
ctx.session.write_event_to_stream(Event(msg=w))
return StopEvent(result=None)
@pytest.mark.asyncio()
async def test_e2e():
wf = StreamingWorkflow()
r = wf.run()
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
await r
@pytest.mark.asyncio()
async def test_too_many_runs():
wf = StreamingWorkflow()
r = asyncio.gather(wf.run(), wf.run())
with pytest.raises(
WorkflowRuntimeError,
match="This workflow has multiple concurrent runs in progress and cannot stream events",
):
async for ev in wf.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_task_raised():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
raise ValueError("The step raised an error!")
wf = DummyWorkflow()
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(
WorkflowRuntimeError, match="Error in step 'step': The step raised an error!"
):
await r
@pytest.mark.asyncio()
async def test_task_timeout():
class DummyWorkflow(Workflow):
@step
async def step(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(OneTestEvent(test_param="foo"))
await asyncio.sleep(2)
return StopEvent()
wf = DummyWorkflow(timeout=1)
r = wf.run()
# Make sure we don't block indefinitely here because the step raised
async for ev in r.stream_events():
if not isinstance(ev, StopEvent):
assert ev.test_param == "foo"
# Make sure the await actually caught the exception
with pytest.raises(WorkflowTimeoutError, match="Operation timed out"):
await r
@pytest.mark.asyncio()
async def test_multiple_sequential_streams():
wf = StreamingWorkflow()
r = wf.run()
# stream 1
async for _ in r.stream_events():
pass
await r
# stream 2 -- should not raise an error
r = wf.run()
async for _ in r.stream_events():
pass
await r
@pytest.mark.asyncio()
async def test_multiple_ongoing_streams():
wf = StreamingWorkflow()
stream_1 = wf.run()
stream_2 = wf.run()
async for ev in stream_1.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
async for ev in stream_2.stream_events():
if not isinstance(ev, StopEvent):
assert "msg" in ev
@pytest.mark.asyncio()
async def test_resume_streams():
class CounterWorkflow(Workflow):
@step
async def count(self, ctx: Context, ev: StartEvent) -> StopEvent:
ctx.write_event_to_stream(Event(msg="hello!"))
cur_count = await ctx.get("cur_count", default=0)
await ctx.set("cur_count", cur_count + 1)
return StopEvent(result="done")
wf = CounterWorkflow()
handler_1 = wf.run()
async for _ in handler_1.stream_events():
pass
await handler_1
handler_2 = wf.run(ctx=handler_1.ctx)
async for _ in handler_2.stream_events():
pass
await handler_2
assert await handler_2.ctx.get("cur_count") == 2
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet101_caffe')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.13.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method'
]
|
"""Utilities for chat loaders."""
from copy import deepcopy
from typing import Iterable, Iterator, List
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, BaseMessage
def merge_chat_runs_in_session(
chat_session: ChatSession, delimiter: str = "\n\n"
) -> ChatSession:
"""Merge chat runs together in a chat session.
A chat run is a sequence of messages from the same sender.
Args:
chat_session: A chat session.
Returns:
A chat session with merged chat runs.
"""
messages: List[BaseMessage] = []
for message in chat_session["messages"]:
if not isinstance(message.content, str):
raise ValueError(
"Chat Loaders only support messages with content type string, "
f"got {message.content}"
)
if not messages:
messages.append(deepcopy(message))
elif (
isinstance(message, type(messages[-1]))
and messages[-1].additional_kwargs.get("sender") is not None
and messages[-1].additional_kwargs["sender"]
== message.additional_kwargs.get("sender")
):
if not isinstance(messages[-1].content, str):
raise ValueError(
"Chat Loaders only support messages with content type string, "
f"got {messages[-1].content}"
)
messages[-1].content = (
messages[-1].content + delimiter + message.content
).strip()
messages[-1].additional_kwargs.get("events", []).extend(
message.additional_kwargs.get("events") or []
)
else:
messages.append(deepcopy(message))
return ChatSession(messages=messages)
def merge_chat_runs(chat_sessions: Iterable[ChatSession]) -> Iterator[ChatSession]:
"""Merge chat runs together.
A chat run is a sequence of messages from the same sender.
Args:
chat_sessions: A list of chat sessions.
Returns:
A list of chat sessions with merged chat runs.
"""
for chat_session in chat_sessions:
yield merge_chat_runs_in_session(chat_session)
def map_ai_messages_in_session(chat_sessions: ChatSession, sender: str) -> ChatSession:
"""Convert messages from the specified 'sender' to AI messages.
This is useful for fine-tuning the AI to adapt to your voice.
"""
messages = []
num_converted = 0
for message in chat_sessions["messages"]:
if message.additional_kwargs.get("sender") == sender:
message = AIMessage(
content=message.content,
additional_kwargs=message.additional_kwargs.copy(),
example=getattr(message, "example", None),
)
num_converted += 1
messages.append(message)
return ChatSession(messages=messages)
def map_ai_messages(
chat_sessions: Iterable[ChatSession], sender: str
) -> Iterator[ChatSession]:
"""Convert messages from the specified 'sender' to AI messages.
This is useful for fine-tuning the AI to adapt to your voice.
"""
for chat_session in chat_sessions:
yield map_ai_messages_in_session(chat_session, sender)
|
"""Utilities for chat loaders."""
from copy import deepcopy
from typing import Iterable, Iterator, List
from langchain_core.chat_sessions import ChatSession
from langchain_core.messages import AIMessage, BaseMessage
def merge_chat_runs_in_session(
chat_session: ChatSession, delimiter: str = "\n\n"
) -> ChatSession:
"""Merge chat runs together in a chat session.
A chat run is a sequence of messages from the same sender.
Args:
chat_session: A chat session.
Returns:
A chat session with merged chat runs.
"""
messages: List[BaseMessage] = []
for message in chat_session["messages"]:
if not isinstance(message.content, str):
raise ValueError(
"Chat Loaders only support messages with content type string, "
f"got {message.content}"
)
if not messages:
messages.append(deepcopy(message))
elif (
isinstance(message, type(messages[-1]))
and messages[-1].additional_kwargs.get("sender") is not None
and messages[-1].additional_kwargs["sender"]
== message.additional_kwargs.get("sender")
):
if not isinstance(messages[-1].content, str):
raise ValueError(
"Chat Loaders only support messages with content type string, "
f"got {messages[-1].content}"
)
messages[-1].content = (
messages[-1].content + delimiter + message.content
).strip()
messages[-1].additional_kwargs.get("events", []).extend(
message.additional_kwargs.get("events") or []
)
else:
messages.append(deepcopy(message))
return ChatSession(messages=messages)
def merge_chat_runs(chat_sessions: Iterable[ChatSession]) -> Iterator[ChatSession]:
"""Merge chat runs together.
A chat run is a sequence of messages from the same sender.
Args:
chat_sessions: A list of chat sessions.
Returns:
A list of chat sessions with merged chat runs.
"""
for chat_session in chat_sessions:
yield merge_chat_runs_in_session(chat_session)
def map_ai_messages_in_session(chat_sessions: ChatSession, sender: str) -> ChatSession:
"""Convert messages from the specified 'sender' to AI messages.
This is useful for fine-tuning the AI to adapt to your voice.
"""
messages = []
num_converted = 0
for message in chat_sessions["messages"]:
if message.additional_kwargs.get("sender") == sender:
message = AIMessage(
content=message.content,
additional_kwargs=message.additional_kwargs.copy(),
example=getattr(message, "example", None), # type: ignore[arg-type]
)
num_converted += 1
messages.append(message)
return ChatSession(messages=messages)
def map_ai_messages(
chat_sessions: Iterable[ChatSession], sender: str
) -> Iterator[ChatSession]:
"""Convert messages from the specified 'sender' to AI messages.
This is useful for fine-tuning the AI to adapt to your voice.
"""
for chat_session in chat_sessions:
yield map_ai_messages_in_session(chat_session, sender)
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
# Turn 1.11.0aHASH into 1.11 (major.minor only)
version = ".".join(torchvision.__version__.split(".")[:2])
if version >= "0.16":
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Tool for the Reddit search API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
class RedditSearchSchema(BaseModel):
"""Input for Reddit search."""
query: str = Field(
description="should be query string that post title should \
contain, or '*' if anything is allowed."
)
sort: str = Field(
description='should be sort method, which is one of: "relevance" \
, "hot", "top", "new", or "comments".'
)
time_filter: str = Field(
description='should be time period to filter by, which is \
one of "all", "day", "hour", "month", "week", or "year"'
)
subreddit: str = Field(
description='should be name of subreddit, like "all" for \
r/all'
)
limit: str = Field(
description="a positive integer indicating the maximum number \
of results to return"
)
class RedditSearchRun(BaseTool):
"""Tool that queries for posts on a subreddit."""
name: str = "reddit_search"
description: str = (
"A tool that searches for posts on Reddit."
"Useful when you need to know post information on a subreddit."
)
api_wrapper: RedditSearchAPIWrapper = Field(default_factory=RedditSearchAPIWrapper) # type: ignore[arg-type]
args_schema: Type[BaseModel] = RedditSearchSchema
def _run(
self,
query: str,
sort: str,
time_filter: str,
subreddit: str,
limit: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(
query=query,
sort=sort,
time_filter=time_filter,
subreddit=subreddit,
limit=int(limit),
)
|
"""Tool for the Reddit search API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
class RedditSearchSchema(BaseModel):
"""Input for Reddit search."""
query: str = Field(
description="should be query string that post title should \
contain, or '*' if anything is allowed."
)
sort: str = Field(
description='should be sort method, which is one of: "relevance" \
, "hot", "top", "new", or "comments".'
)
time_filter: str = Field(
description='should be time period to filter by, which is \
one of "all", "day", "hour", "month", "week", or "year"'
)
subreddit: str = Field(
description='should be name of subreddit, like "all" for \
r/all'
)
limit: str = Field(
description="a positive integer indicating the maximum number \
of results to return"
)
class RedditSearchRun(BaseTool): # type: ignore[override, override]
"""Tool that queries for posts on a subreddit."""
name: str = "reddit_search"
description: str = (
"A tool that searches for posts on Reddit."
"Useful when you need to know post information on a subreddit."
)
api_wrapper: RedditSearchAPIWrapper = Field(default_factory=RedditSearchAPIWrapper) # type: ignore[arg-type]
args_schema: Type[BaseModel] = RedditSearchSchema
def _run(
self,
query: str,
sort: str,
time_filter: str,
subreddit: str,
limit: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(
query=query,
sort=sort,
time_filter=time_filter,
subreddit=subreddit,
limit=int(limit),
)
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import (
CONVTASNET_BASE_LIBRI2MIX,
HDEMUCS_HIGH_MUSDB,
HDEMUCS_HIGH_MUSDB_PLUS,
SourceSeparationBundle,
)
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"SourceSeparationBundle",
"HDEMUCS_HIGH_MUSDB_PLUS",
"HDEMUCS_HIGH_MUSDB",
]
|
import copy
import os.path as osp
import unittest
from mmcv.transforms import Compose
from mmdet.datasets.transforms import MultiBranch
from mmdet.utils import register_all_modules
register_all_modules()
class TestMultiBranch(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction',
'homography_matrix')
self.results = {
'img_path':
img_path,
'img_id':
12345,
'img_shape': (300, 400),
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'ignore_flag': 1
}]
}
self.weak_pipeline = [
dict(type='ShearX'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.strong_pipeline = [
dict(type='ShearX'),
dict(type='ShearY'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.labeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
sup_teacher=self.weak_pipeline,
sup_student=self.strong_pipeline),
]
self.unlabeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
unsup_teacher=self.weak_pipeline,
unsup_student=self.strong_pipeline),
]
def test_transform(self):
labeled_pipeline = Compose(self.labeled_pipeline)
labeled_results = labeled_pipeline(copy.deepcopy(self.results))
unlabeled_pipeline = Compose(self.unlabeled_pipeline)
unlabeled_results = unlabeled_pipeline(copy.deepcopy(self.results))
# test branch sup_teacher and sup_student
sup_branches = ['sup_teacher', 'sup_student']
for branch in sup_branches:
self.assertIn(branch, labeled_results)
self.assertIn('homography_matrix',
labeled_results[branch]['data_sample'])
self.assertIn('labels',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('bboxes',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('masks',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('gt_sem_seg', labeled_results[branch]['data_sample'])
# test branch unsup_teacher and unsup_student
unsup_branches = ['unsup_teacher', 'unsup_student']
for branch in unsup_branches:
self.assertIn(branch, unlabeled_results)
self.assertIn('homography_matrix',
unlabeled_results[branch]['data_sample'])
self.assertNotIn(
'labels',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'bboxes',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'masks', unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn('gt_sem_seg',
unlabeled_results[branch]['data_sample'])
def test_repr(self):
pipeline = [dict(type='PackDetInputs', meta_keys=())]
transform = MultiBranch(sup=pipeline, unsup=pipeline)
self.assertEqual(
repr(transform),
("MultiBranch(branch_pipelines=['sup', 'unsup'])"))
|
import copy
import os.path as osp
import unittest
from mmcv.transforms import Compose
from mmdet.datasets.transforms import MultiBranch
from mmdet.utils import register_all_modules
register_all_modules()
class TestMultiBranch(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction',
'homography_matrix')
self.results = {
'img_path':
img_path,
'img_id':
12345,
'img_shape': (300, 400),
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
self.weak_pipeline = [
dict(type='ShearX'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.strong_pipeline = [
dict(type='ShearX'),
dict(type='ShearY'),
dict(type='PackDetInputs', meta_keys=self.meta_keys)
]
self.labeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
sup_teacher=self.weak_pipeline,
sup_student=self.strong_pipeline),
]
self.unlabeled_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='MultiBranch',
unsup_teacher=self.weak_pipeline,
unsup_student=self.strong_pipeline),
]
def test_transform(self):
labeled_pipeline = Compose(self.labeled_pipeline)
labeled_results = labeled_pipeline(copy.deepcopy(self.results))
unlabeled_pipeline = Compose(self.unlabeled_pipeline)
unlabeled_results = unlabeled_pipeline(copy.deepcopy(self.results))
# test branch sup_teacher and sup_student
sup_branches = ['sup_teacher', 'sup_student']
for branch in sup_branches:
self.assertIn(branch, labeled_results)
self.assertIn('homography_matrix',
labeled_results[branch]['data_sample'])
self.assertIn('labels',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('bboxes',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('masks',
labeled_results[branch]['data_sample'].gt_instances)
self.assertIn('gt_sem_seg', labeled_results[branch]['data_sample'])
# test branch unsup_teacher and unsup_student
unsup_branches = ['unsup_teacher', 'unsup_student']
for branch in unsup_branches:
self.assertIn(branch, unlabeled_results)
self.assertIn('homography_matrix',
unlabeled_results[branch]['data_sample'])
self.assertNotIn(
'labels',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'bboxes',
unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn(
'masks', unlabeled_results[branch]['data_sample'].gt_instances)
self.assertNotIn('gt_sem_seg',
unlabeled_results[branch]['data_sample'])
def test_repr(self):
pipeline = [dict(type='PackDetInputs', meta_keys=())]
transform = MultiBranch(sup=pipeline, unsup=pipeline)
self.assertEqual(
repr(transform),
("MultiBranch(branch_pipelines=['sup', 'unsup'])"))
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET) -> None:
super().__init__(level)
def emit(self, record) -> None:
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s") -> None:
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s"):
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=True,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
default_hooks = dict(optimizer=dict(type='OptimizerHook', grad_clip=None))
|
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=True,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# dataset settings
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer_config = dict(_delete_=True, grad_clip=None)
lr_config = dict(warmup='linear')
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='AudioTorchTensor')
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = parse_obj_as(AudioTorchTensor, doc_2.url.load())
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
"""
def to_audio_bytes(self):
import torch
tensor = (self * MAX_INT_16).to(dtype=torch.int16)
return tensor.cpu().detach().numpy().tobytes()
|
from typing import TypeVar
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.audio.audio_ndarray import MAX_INT_16
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='AudioTorchTensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = parse_obj_as(AudioTorchTensor, doc_2.url.load())
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
"""
_PROTO_FIELD_NAME = 'audio_torch_tensor'
def to_audio_bytes(self):
import torch
tensor = (self * MAX_INT_16).to(dtype=torch.int16)
return tensor.cpu().detach().numpy().tobytes()
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
BarkScale,
BarkSpectrogram,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseBarkScale,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseBarkScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"BarkScale",
"MelSpectrogram",
"BarkSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmdet.registry import MODELS
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
@MODELS.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
alpha=2.0,
gamma=4.0,
reduction='mean',
loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
@LOSSES.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
alpha=2.0,
gamma=4.0,
reduction='mean',
loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
NdArrayProto,
NodeProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
NdArrayProto,
NodeProto,
)
__all__ = [
'DocumentArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocumentArrayStackedProto',
'DocumentArrayProto',
]
|
from google.protobuf import __version__ as __pb__version__
if __pb__version__.startswith('4'):
from docarray.proto.pb.docarray_pb2 import (
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
NdArrayProto,
NodeProto,
UnionArrayProto,
)
else:
from docarray.proto.pb2.docarray_pb2 import (
DocumentArrayProto,
DocumentArrayStackedProto,
DocumentProto,
NdArrayProto,
NodeProto,
UnionArrayProto,
)
__all__ = [
'DocumentArrayProto',
'DocumentProto',
'NdArrayProto',
'NodeProto',
'DocumentArrayStackedProto',
'DocumentArrayProto',
'UnionArrayProto',
]
|
import pytest
import torch
import torchaudio
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
@pytest.fixture
def sample_speech(tmp_path, lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = tmp_path.parent / filename
if not path.exists():
torchaudio.utils.download_asset(f"test-assets/{filename}", path=path)
return path
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmpdir, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
torch.hub.set_dir(tmpdir)
yield
torch.hub.set_dir(org_dir)
|
import pytest
import torch
from torchaudio._internal import download_url_to_file
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank: int = 0):
super().__init__()
self.blank = blank
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = []
for i in best_path:
if i != self.blank:
hypothesis.append(self.labels[i])
return "".join(hypothesis)
@pytest.fixture
def ctc_decoder():
return GreedyCTCDecoder
_FILES = {
"en": "Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.flac",
"de": "20090505-0900-PLENARY-16-de_20090505-21_56_00_8.flac",
"en2": "20120613-0900-PLENARY-8-en_20120613-13_46_50_3.flac",
"es": "20130207-0900-PLENARY-7-es_20130207-13_02_05_5.flac",
"fr": "20121212-0900-PLENARY-5-fr_20121212-11_37_04_10.flac",
"it": "20170516-0900-PLENARY-16-it_20170516-18_56_31_1.flac",
}
@pytest.fixture
def sample_speech(tmp_path, lang):
if lang not in _FILES:
raise NotImplementedError(f"Unexpected lang: {lang}")
filename = _FILES[lang]
path = tmp_path.parent / filename
if not path.exists():
url = f"https://download.pytorch.org/torchaudio/test-assets/{filename}"
print(f"downloading from {url}")
download_url_to_file(url, path, progress=False)
return path
def pytest_addoption(parser):
parser.addoption(
"--use-tmp-hub-dir",
action="store_true",
help=(
"When provided, tests will use temporary directory as Torch Hub directory. "
"Downloaded models will be deleted after each test."
),
)
@pytest.fixture(autouse=True)
def temp_hub_dir(tmpdir, pytestconfig):
if not pytestconfig.getoption("use_tmp_hub_dir"):
yield
else:
org_dir = torch.hub.get_dir()
torch.hub.set_dir(tmpdir)
yield
torch.hub.set_dir(org_dir)
|
import os
import pandas as pd
from huggingface_hub import hf_hub_download, upload_file
from huggingface_hub.utils import EntryNotFoundError
REPO_ID = "diffusers/benchmarks"
def has_previous_benchmark() -> str:
from run_all import FINAL_CSV_FILENAME
csv_path = None
try:
csv_path = hf_hub_download(repo_id=REPO_ID, repo_type="dataset", filename=FINAL_CSV_FILENAME)
except EntryNotFoundError:
csv_path = None
return csv_path
def filter_float(value):
if isinstance(value, str):
return float(value.split()[0])
return value
def push_to_hf_dataset():
from run_all import FINAL_CSV_FILENAME, GITHUB_SHA
csv_path = has_previous_benchmark()
if csv_path is not None:
current_results = pd.read_csv(FINAL_CSV_FILENAME)
previous_results = pd.read_csv(csv_path)
numeric_columns = current_results.select_dtypes(include=["float64", "int64"]).columns
for column in numeric_columns:
# get previous values as floats, aligned to current index
prev_vals = previous_results[column].map(filter_float).reindex(current_results.index)
# get current values as floats
curr_vals = current_results[column].astype(float)
# stringify the current values
curr_str = curr_vals.map(str)
# build an appendage only when prev exists and differs
append_str = prev_vals.where(prev_vals.notnull() & (prev_vals != curr_vals), other=pd.NA).map(
lambda x: f" ({x})" if pd.notnull(x) else ""
)
# combine
current_results[column] = curr_str + append_str
os.remove(FINAL_CSV_FILENAME)
current_results.to_csv(FINAL_CSV_FILENAME, index=False)
commit_message = f"upload from sha: {GITHUB_SHA}" if GITHUB_SHA is not None else "upload benchmark results"
upload_file(
repo_id=REPO_ID,
path_in_repo=FINAL_CSV_FILENAME,
path_or_fileobj=FINAL_CSV_FILENAME,
repo_type="dataset",
commit_message=commit_message,
)
upload_file(
repo_id="diffusers/benchmark-analyzer",
path_in_repo=FINAL_CSV_FILENAME,
path_or_fileobj=FINAL_CSV_FILENAME,
repo_type="space",
commit_message=commit_message,
)
if __name__ == "__main__":
push_to_hf_dataset()
|
import glob
import sys
import pandas as pd
from huggingface_hub import hf_hub_download, upload_file
from huggingface_hub.utils import EntryNotFoundError
sys.path.append(".")
from utils import BASE_PATH, FINAL_CSV_FILE, GITHUB_SHA, REPO_ID, collate_csv # noqa: E402
def has_previous_benchmark() -> str:
csv_path = None
try:
csv_path = hf_hub_download(repo_id=REPO_ID, repo_type="dataset", filename=FINAL_CSV_FILE)
except EntryNotFoundError:
csv_path = None
return csv_path
def filter_float(value):
if isinstance(value, str):
return float(value.split()[0])
return value
def push_to_hf_dataset():
all_csvs = sorted(glob.glob(f"{BASE_PATH}/*.csv"))
collate_csv(all_csvs, FINAL_CSV_FILE)
# If there's an existing benchmark file, we should report the changes.
csv_path = has_previous_benchmark()
if csv_path is not None:
current_results = pd.read_csv(FINAL_CSV_FILE)
previous_results = pd.read_csv(csv_path)
numeric_columns = current_results.select_dtypes(include=["float64", "int64"]).columns
numeric_columns = [
c for c in numeric_columns if c not in ["batch_size", "num_inference_steps", "actual_gpu_memory (gbs)"]
]
for column in numeric_columns:
previous_results[column] = previous_results[column].map(lambda x: filter_float(x))
# Calculate the percentage change
current_results[column] = current_results[column].astype(float)
previous_results[column] = previous_results[column].astype(float)
percent_change = ((current_results[column] - previous_results[column]) / previous_results[column]) * 100
# Format the values with '+' or '-' sign and append to original values
current_results[column] = current_results[column].map(str) + percent_change.map(
lambda x: f" ({'+' if x > 0 else ''}{x:.2f}%)"
)
# There might be newly added rows. So, filter out the NaNs.
current_results[column] = current_results[column].map(lambda x: x.replace(" (nan%)", ""))
# Overwrite the current result file.
current_results.to_csv(FINAL_CSV_FILE, index=False)
commit_message = f"upload from sha: {GITHUB_SHA}" if GITHUB_SHA is not None else "upload benchmark results"
upload_file(
repo_id=REPO_ID,
path_in_repo=FINAL_CSV_FILE,
path_or_fileobj=FINAL_CSV_FILE,
repo_type="dataset",
commit_message=commit_message,
)
if __name__ == "__main__":
push_to_hf_dataset()
|
"""
=====================================
Plot the support vectors in LinearSVC
=====================================
Unlike SVC (based on LIBSVM), LinearSVC (based on LIBLINEAR) does not provide
the support vectors. This example demonstrates how to obtain the support
vectors in LinearSVC.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.svm import LinearSVC
X, y = make_blobs(n_samples=40, centers=2, random_state=0)
plt.figure(figsize=(10, 5))
for i, C in enumerate([1, 100]):
# "hinge" is the standard SVM loss
clf = LinearSVC(C=C, loss="hinge", random_state=42).fit(X, y)
# obtain the support vectors through the decision function
decision_function = clf.decision_function(X)
# we can also calculate the decision function manually
# decision_function = np.dot(X, clf.coef_[0]) + clf.intercept_[0]
# The support vectors are the samples that lie within the margin
# boundaries, whose size is conventionally constrained to 1
support_vector_indices = (np.abs(decision_function) <= 1 + 1e-15).nonzero()[0]
support_vectors = X[support_vector_indices]
plt.subplot(1, 2, i + 1)
plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)
ax = plt.gca()
DecisionBoundaryDisplay.from_estimator(
clf,
X,
ax=ax,
grid_resolution=50,
plot_method="contour",
colors="k",
levels=[-1, 0, 1],
alpha=0.5,
linestyles=["--", "-", "--"],
)
plt.scatter(
support_vectors[:, 0],
support_vectors[:, 1],
s=100,
linewidth=1,
facecolors="none",
edgecolors="k",
)
plt.title("C=" + str(C))
plt.tight_layout()
plt.show()
|
"""
=====================================
Plot the support vectors in LinearSVC
=====================================
Unlike SVC (based on LIBSVM), LinearSVC (based on LIBLINEAR) does not provide
the support vectors. This example demonstrates how to obtain the support
vectors in LinearSVC.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.svm import LinearSVC
X, y = make_blobs(n_samples=40, centers=2, random_state=0)
plt.figure(figsize=(10, 5))
for i, C in enumerate([1, 100]):
# "hinge" is the standard SVM loss
clf = LinearSVC(C=C, loss="hinge", random_state=42).fit(X, y)
# obtain the support vectors through the decision function
decision_function = clf.decision_function(X)
# we can also calculate the decision function manually
# decision_function = np.dot(X, clf.coef_[0]) + clf.intercept_[0]
# The support vectors are the samples that lie within the margin
# boundaries, whose size is conventionally constrained to 1
support_vector_indices = np.where(np.abs(decision_function) <= 1 + 1e-15)[0]
support_vectors = X[support_vector_indices]
plt.subplot(1, 2, i + 1)
plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired)
ax = plt.gca()
DecisionBoundaryDisplay.from_estimator(
clf,
X,
ax=ax,
grid_resolution=50,
plot_method="contour",
colors="k",
levels=[-1, 0, 1],
alpha=0.5,
linestyles=["--", "-", "--"],
)
plt.scatter(
support_vectors[:, 0],
support_vectors[:, 1],
s=100,
linewidth=1,
facecolors="none",
edgecolors="k",
)
plt.title("C=" + str(C))
plt.tight_layout()
plt.show()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
TWITTER = "twitter"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
MEDIUM = "medium"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
TWITTER = "twitter"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.