input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 56, 56])
assert feat[1].shape == torch.Size([1, out_channels[1], 28, 28])
assert feat[2].shape == torch.Size([1, out_channels[2], 14, 14])
assert feat[3].shape == torch.Size([1, out_channels[3], 7, 7])
|
import pytest
from hubble.executor.hubio import HubIO
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_pod_parser
@pytest.mark.skip('jinahub not available')
@pytest.mark.parametrize('uses', ['jinaai+docker://jina-ai/DummyExecutor'])
def test_container_pod(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_pull(self):
return 'docker://jinahub/dummy_executor'
monkeypatch.setattr(HubIO, 'pull', _mock_pull)
args = set_pod_parser().parse_args(['--uses', uses])
pod = PodFactory.build_pod(args)
assert pod.args.uses == 'docker://jinahub/dummy_executor'
assert pod.name == 'ContainerPod'
|
import pytest
from hubble.executor.hubio import HubIO
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_pod_parser
@pytest.mark.parametrize('uses', ['jinaai+docker://jina-ai/DummyExecutor'])
def test_container_pod(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_pull(self):
return 'docker://jinahub/dummy_executor'
monkeypatch.setattr(HubIO, 'pull', _mock_pull)
args = set_pod_parser().parse_args(['--uses', uses])
pod = PodFactory.build_pod(args)
assert pod.args.uses == 'docker://jinahub/dummy_executor'
assert pod.name == 'ContainerPod'
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(
['glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.language_model)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py', ('cpu',
'cuda'))
])
def test_glip_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
# test custom_entities is True
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
texts=['a', 'b'],
custom_entities=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# test custom_entities is False
# packed_inputs = demo_mm_inputs(
# 2, [[3, 128, 128], [3, 125, 130]],
# texts=['a', 'b'],
# custom_entities=False)
# data = detector.data_preprocessor(packed_inputs, False)
# # Test forward test
# detector.eval()
# with torch.no_grad():
# batch_results = detector.forward(**data, mode='predict')
# self.assertEqual(len(batch_results), 2)
# self.assertIsInstance(batch_results[0], DetDataSample)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestGLIP(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand(
['glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py'])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.language_model)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('glip/glip_atss_swin-t_a_fpn_dyhead_pretrain_obj365.py', ('cpu',
'cuda'))
])
def test_glip_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmdet.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
# test custom_entities is True
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
texts=['a', 'b'],
custom_entities=True)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
# test custom_entities is False
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
texts=['a', 'b'],
custom_entities=False)
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
async def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = await Requests().post(url, headers=headers, json=payload)
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
from typing import List
from pydantic import BaseModel
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
class ContentRetrievalSettings(BaseModel):
text: dict = SchemaField(
description="Text content settings",
default={"maxCharacters": 1000, "includeHtmlTags": False},
advanced=True,
)
highlights: dict = SchemaField(
description="Highlight settings",
default={
"numSentences": 3,
"highlightsPerUrl": 3,
"query": "",
},
advanced=True,
)
summary: dict = SchemaField(
description="Summary settings",
default={"query": ""},
advanced=True,
)
class ExaContentsBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
ids: List[str] = SchemaField(
description="Array of document IDs obtained from searches",
)
contents: ContentRetrievalSettings = SchemaField(
description="Content retrieval settings",
default=ContentRetrievalSettings(),
advanced=True,
)
class Output(BlockSchema):
results: list = SchemaField(
description="List of document contents",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="c52be83f-f8cd-4180-b243-af35f986b461",
description="Retrieves document contents using Exa's contents API",
categories={BlockCategory.SEARCH},
input_schema=ExaContentsBlock.Input,
output_schema=ExaContentsBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/contents"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"ids": input_data.ids,
"text": input_data.contents.text,
"highlights": input_data.contents.highlights,
"summary": input_data.contents.summary,
}
try:
response = Requests().post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean'
]
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
class PipelineTest(testing.TestCase):
def test_basics(self):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.Pipeline,
init_kwargs={
"layers": [layers.AutoContrast(), layers.RandomBrightness(0.1)],
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_mixed_precision_check=False,
run_training_check=run_training_check,
)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not working in numpy"
)
def test_correctness(self):
pipeline = layers.Pipeline([CanaryLayer(), CanaryLayer()])
x = np.array([0])
mask = np.array([0])
pipeline(x, training=True, mask=mask)
self.assertTrue(pipeline.layers[0].training)
self.assertTrue(pipeline.layers[0].received_mask)
self.assertTrue(pipeline.layers[1].training)
self.assertTrue(pipeline.layers[1].received_mask)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
def test_from_config(self):
pipeline = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
x = np.ones((2, 10, 12, 3))
output = pipeline(x)
restored = layers.Pipeline.from_config(pipeline.get_config())
restored_output = restored(x)
self.assertEqual(tuple(output.shape), (2, 8, 9, 3))
self.assertAllClose(output, restored_output)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
class PipelineTest(testing.TestCase):
def test_basics(self):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.Pipeline,
init_kwargs={
"layers": [layers.AutoContrast(), layers.RandomBrightness(0.1)],
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_mixed_precision_check=False,
run_training_check=run_training_check,
)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not working in numpy"
)
def test_correctness(self):
pipeline = layers.Pipeline([CanaryLayer(), CanaryLayer()])
x = np.array([0])
mask = np.array([0])
pipeline(x, training=True, mask=mask)
self.assertTrue(pipeline.layers[0].training)
self.assertTrue(pipeline.layers[0].received_mask)
self.assertTrue(pipeline.layers[1].training)
self.assertTrue(pipeline.layers[1].received_mask)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
import gzip
import json
import os
from sentence_transformers import SentenceTransformer, util
# Load the model we trained in 2_programming_train_bi-encoder.py
model = SentenceTransformer("output/programming-model")
# Load the corpus
docs = []
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
title = data["title"]
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
docs.append((title, p))
paragraph_emb = model.encode([d[1] for d in docs], convert_to_tensor=True)
print("Available Wikipedia Articles:")
print(", ".join(sorted(list(set([d[0] for d in docs])))))
# Example for semantic search
while True:
query = input("Query: ")
query_emb = model.encode(query, convert_to_tensor=True)
hits = util.semantic_search(query_emb, paragraph_emb, top_k=3)[0]
for hit in hits:
doc = docs[hit["corpus_id"]]
print("{:.2f}\t{}\t\t{}".format(hit["score"], doc[0], doc[1]))
print("\n=================\n")
|
"""
In this example we train a semantic search model to search through Wikipedia
articles about programming articles & technologies.
We use the text paragraphs from the following Wikipedia articles:
Assembly language, C , C Sharp , C++, Go , Java , JavaScript, Keras, Laravel, MATLAB, Matplotlib, MongoDB, MySQL, Natural Language Toolkit, NumPy, pandas (software), Perl, PHP, PostgreSQL, Python , PyTorch, R , React, Rust , Scala , scikit-learn, SciPy, Swift , TensorFlow, Vue.js
In:
1_programming_query_generation.py - We generate queries for all paragraphs from these articles
2_programming_train_bi-encoder.py - We train a SentenceTransformer bi-encoder with these generated queries. This results in a model we can then use for semantic search (for the given Wikipedia articles).
3_programming_semantic_search.py - Shows how the trained model can be used for semantic search
"""
from sentence_transformers import SentenceTransformer, util
import gzip
import json
import os
# Load the model we trained in 2_programming_train_bi-encoder.py
model = SentenceTransformer("output/programming-model")
# Load the corpus
docs = []
corpus_filepath = "wiki-programmming-20210101.jsonl.gz"
if not os.path.exists(corpus_filepath):
util.http_get("https://sbert.net/datasets/wiki-programmming-20210101.jsonl.gz", corpus_filepath)
with gzip.open(corpus_filepath, "rt") as fIn:
for line in fIn:
data = json.loads(line.strip())
title = data["title"]
for p in data["paragraphs"]:
if len(p) > 100: # Only take paragraphs with at least 100 chars
docs.append((title, p))
paragraph_emb = model.encode([d[1] for d in docs], convert_to_tensor=True)
print("Available Wikipedia Articles:")
print(", ".join(sorted(list(set([d[0] for d in docs])))))
# Example for semantic search
while True:
query = input("Query: ")
query_emb = model.encode(query, convert_to_tensor=True)
hits = util.semantic_search(query_emb, paragraph_emb, top_k=3)[0]
for hit in hits:
doc = docs[hit["corpus_id"]]
print("{:.2f}\t{}\t\t{}".format(hit["score"], doc[0], doc[1]))
print("\n=================\n")
|
"""Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
"""
import pytest as pytest
from langchain_fireworks import Fireworks
_MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
def test_fireworks_call() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model=_MODEL,
temperature=0.2,
max_tokens=250,
)
output = llm.invoke("Say foo:")
assert llm._llm_type == "fireworks"
assert isinstance(output, str)
assert len(output) > 0
async def test_fireworks_acall() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model=_MODEL,
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "fireworks"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model=_MODEL)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model=_MODEL)
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_abatch() -> None:
"""Test streaming tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
def test_batch() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
def test_invoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = llm.invoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
|
"""Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
"""
import pytest as pytest
from langchain_fireworks import Fireworks
_MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
def test_fireworks_call() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model=_MODEL,
temperature=0.2,
max_tokens=250,
)
output = llm.invoke("Say foo:")
assert llm._llm_type == "fireworks"
assert isinstance(output, str)
assert len(output) > 0
async def test_fireworks_acall() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model=_MODEL,
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "fireworks"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model=_MODEL)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model=_MODEL)
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_abatch() -> None:
"""Test streaming tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
def test_batch() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
def test_invoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result, str)
|
from typing import Any # noqa: F401
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # type: Any
class metaTorchAndEmbedding(torch_base, embedding_base):
pass
@_register_proto(proto_type_name='torch_embedding')
class TorchEmbedding(TorchTensor, EmbeddingMixin, metaclass=metaTorchAndEmbedding):
alternative_type = TorchTensor
def new_empty(self, *args, **kwargs):
"""
This method enables the deepcopy of TorchEmbedding by returning another instance of this subclass.
If this function is not implemented, the deepcopy will throw an RuntimeError from Torch.
"""
return self.__class__(TorchTensor.new_empty(self, *args, **kwargs))
|
from typing import Any # noqa: F401
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.torch_tensor import TorchTensor
torch_base = type(TorchTensor) # type: Any
embedding_base = type(EmbeddingMixin) # type: Any
class metaTorchAndEmbedding(torch_base, embedding_base):
pass
@_register_proto(proto_type_name='torch_embedding')
class TorchEmbedding(TorchTensor, EmbeddingMixin, metaclass=metaTorchAndEmbedding):
alternative_type = TorchTensor
|
from typing import TYPE_CHECKING, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
from docarray.proto import NodeProto
T = TypeVar('T', bound='AnyUrl')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(any_url=str(self))
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file path.
"""
scheme = parts['scheme']
if scheme is None:
pass # allow missing scheme, unlike pydantic
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
from typing import TYPE_CHECKING, Type, TypeVar
from pydantic import AnyUrl as BaseAnyUrl
from pydantic import errors, parse_obj_as
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.networks import Parts
T = TypeVar('T', bound='AnyUrl')
class AnyUrl(BaseAnyUrl, AbstractType):
host_required = (
False # turn off host requirement to allow passing of local paths as URL
)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(any_url=str(self))
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Our URLs should be able to function both in local and remote settings.
Therefore, we allow missing `scheme`, making it possible to pass a file path.
"""
scheme = parts['scheme']
if scheme is None:
pass # allow missing scheme, unlike pydantic
elif cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read url from a proto msg
:param pb_msg:
:return: url
"""
return parse_obj_as(cls, pb_msg)
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.huggingface import HuggingFaceLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.huggingface import HuggingFaceInferenceAPI, HuggingFaceLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in HuggingFaceInferenceAPI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
names_of_base_classes = [b.__name__ for b in HuggingFaceLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
import numpy as np
from docarray import Image
from docarray.typing import Tensor
def test_image():
image = Image(uri='http://jina.ai')
image.tensor = image.uri.load()
assert isinstance(image.tensor, np.ndarray)
|
from docarray import Image
from docarray.typing import Tensor
def test_image():
image = Image(uri='http://jina.ai')
image.tensor = image.uri.load()
assert isinstance(image.tensor, Tensor)
|
from llama_index_instrumentation.span_handlers.base import BaseSpanHandler, T # noqa
|
import inspect
import threading
from abc import abstractmethod
from typing import Any, Dict, List, Generic, Optional, TypeVar
from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr, ConfigDict
from llama_index.core.instrumentation.span.base import BaseSpan
T = TypeVar("T", bound=BaseSpan)
class BaseSpanHandler(BaseModel, Generic[T]):
model_config = ConfigDict(arbitrary_types_allowed=True)
open_spans: Dict[str, T] = Field(
default_factory=dict, description="Dictionary of open spans."
)
completed_spans: List[T] = Field(
default_factory=list, description="List of completed spans."
)
dropped_spans: List[T] = Field(
default_factory=list, description="List of completed spans."
)
current_span_ids: Dict[Any, Optional[str]] = Field(
default={}, description="Id of current spans in a given thread."
)
_lock: Optional[threading.Lock] = PrivateAttr()
def __init__(
self,
open_spans: Dict[str, T] = {},
completed_spans: List[T] = [],
dropped_spans: List[T] = [],
current_span_ids: Dict[Any, str] = {},
):
super().__init__(
open_spans=open_spans,
completed_spans=completed_spans,
dropped_spans=dropped_spans,
current_span_ids=current_span_ids,
)
self._lock = None
def class_name(cls) -> str:
"""Class name."""
return "BaseSpanHandler"
@property
def lock(self) -> threading.Lock:
if self._lock is None:
self._lock = threading.Lock()
return self._lock
def span_enter(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
"""Logic for entering a span."""
if id_ in self.open_spans:
pass # should probably raise an error here
else:
span = self.new_span(
id_=id_,
bound_args=bound_args,
instance=instance,
parent_span_id=parent_id,
tags=tags,
)
if span:
with self.lock:
self.open_spans[id_] = span
def span_exit(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Logic for exiting a span."""
span = self.prepare_to_exit_span(
id_=id_, bound_args=bound_args, instance=instance, result=result
)
if span:
with self.lock:
del self.open_spans[id_]
def span_drop(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> None:
"""Logic for dropping a span i.e. early exit."""
span = self.prepare_to_drop_span(
id_=id_, bound_args=bound_args, instance=instance, err=err
)
if span:
with self.lock:
del self.open_spans[id_]
@abstractmethod
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Create a span.
Subclasses of BaseSpanHandler should create the respective span type T
and return it. Only NullSpanHandler should return a None here.
"""
...
@abstractmethod
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Logic for preparing to exit a span.
Subclasses of BaseSpanHandler should return back the specific span T
that is to be exited. If None is returned, then the span won't actually
be exited.
"""
...
@abstractmethod
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> Optional[T]:
"""
Logic for preparing to drop a span.
Subclasses of BaseSpanHandler should return back the specific span T
that is to be dropped. If None is returned, then the span won't actually
be dropped.
"""
...
|
from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING:
from docarray.typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a JSON, a binary file or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. Note that, CSV file has very limited
compatability, complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to save data into a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
"""
if file_format == 'json':
self.save_json(file, encoding=encoding)
elif file_format == 'binary':
self.save_binary(file)
elif file_format == 'csv':
self.save_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
@classmethod
def load(
cls: Type['T'],
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
**kwargs
) -> 'T':
"""Load array elements from a JSON or a binary file, or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. CSV file has very limited compatability,
complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to load data from a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
:return: the loaded DocumentArray object
"""
if file_format == 'json':
return cls.load_json(file, encoding=encoding, **kwargs)
elif file_format == 'binary':
return cls.load_binary(file)
elif file_format == 'csv':
return cls.load_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
|
from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING:
from ....typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a JSON, a binary file or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. Note that, CSV file has very limited
compatability, complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to save data into a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
"""
if file_format == 'json':
self.save_json(file, encoding=encoding)
elif file_format == 'binary':
self.save_binary(file)
elif file_format == 'csv':
self.save_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
@classmethod
def load(
cls: Type['T'],
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
**kwargs
) -> 'T':
"""Load array elements from a JSON or a binary file, or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. CSV file has very limited compatability,
complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to load data from a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
:return: the loaded DocumentArray object
"""
if file_format == 'json':
return cls.load_json(file, encoding=encoding, **kwargs)
elif file_format == 'binary':
return cls.load_binary(file)
elif file_format == 'csv':
return cls.load_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
import numpy as np
from sentence_transformers.cross_encoder import CrossEncoder
# Pre-trained cross encoder
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# 1. We rank all sentences in the corpus for the query
ranks = model.rank(query, corpus)
# Print the scores
print("Query:", query)
for rank in ranks:
print(f"{rank['score']:.2f}\t{corpus[rank['corpus_id']]}")
# 2. Alternatively, you can also manually compute the score between two sentences
sentence_combinations = [[query, sentence] for sentence in corpus]
scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order to get the corpus indices
ranked_indices = np.argsort(scores)[::-1]
print("scores:", scores)
print("indices:", ranked_indices)
|
"""
This example computes the score between a query and all possible
sentences in a corpus using a Cross-Encoder for semantic textual similarity (STS).
It output then the most similar sentences for the given query.
"""
from sentence_transformers.cross_encoder import CrossEncoder
import numpy as np
# Pre-trained cross encoder
model = CrossEncoder("cross-encoder/stsb-distilroberta-base")
# We want to compute the similarity between the query sentence
query = "A man is eating pasta."
# With all sentences in the corpus
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# 1. We rank all sentences in the corpus for the query
ranks = model.rank(query, corpus)
# Print the scores
print("Query:", query)
for rank in ranks:
print(f"{rank['score']:.2f}\t{corpus[rank['corpus_id']]}")
# 2. Alternatively, you can also manually compute the score between two sentences
sentence_combinations = [[query, sentence] for sentence in corpus]
scores = model.predict(sentence_combinations)
# Sort the scores in decreasing order to get the corpus indices
ranked_indices = np.argsort(scores)[::-1]
print("scores:", scores)
print("indices:", ranked_indices)
|
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..compat import import_cupy
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
cp = import_cupy()
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
def check_invalid_cat_batches(device: str) -> None:
"""Check error message for inconsistent feature types."""
class _InvalidCatIter(DataIter):
def __init__(self) -> None:
super().__init__(cache_prefix=None)
self._it = 0
def next(self, input_data: Callable) -> bool:
if self._it == 2:
return False
X, y = tm.make_categorical(
64,
12,
4,
onehot=False,
sparsity=0.5,
cat_ratio=1.0 if self._it == 0 else 0.5,
)
if device == "cuda":
import cudf
import cupy
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
it = _InvalidCatIter()
import pytest
with pytest.raises(ValueError, match="Inconsistent number of categories between"):
ExtMemQuantileDMatrix(it, enable_categorical=True)
with pytest.raises(ValueError, match="Inconsistent number of categories between"):
QuantileDMatrix(it, enable_categorical=True)
with pytest.raises(ValueError, match="Inconsistent feature types"):
DMatrix(it, enable_categorical=True)
def check_uneven_sizes(device: str) -> None:
"""Tests for having irregular data shapes."""
batches = [
tm.make_regression(n_samples, 16, use_cupy=device == "cuda")
for n_samples in [512, 256, 1024]
]
unzip = list(zip(*batches))
it = tm.IteratorForTest(unzip[0], unzip[1], None, cache="cache", on_host=True)
Xy = DMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
Xy = ExtMemQuantileDMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
class CatIter(DataIter): # pylint: disable=too-many-instance-attributes
"""An iterator for testing categorical features."""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
n_samples_per_batch: int,
n_features: int,
*,
n_batches: int,
n_cats: int,
sparsity: float,
cat_ratio: float,
onehot: bool,
device: str,
cache: Optional[str],
) -> None:
super().__init__(cache_prefix=cache)
self.n_batches = n_batches
self.device = device
n_samples = n_samples_per_batch * n_batches
cat, y = tm.make_categorical(
n_samples,
n_features,
n_categories=n_cats,
onehot=onehot,
cat_ratio=cat_ratio,
sparsity=sparsity,
)
xs, ys = [], []
prev = 0
for _ in range(n_batches):
n = min(n_samples_per_batch, n_samples - prev)
X = cat.iloc[prev : prev + n, :]
xs.append(X)
ys.append(y[prev : prev + n])
prev += n_samples_per_batch
self.xs = xs
self.ys = ys
self.x = cat
self.y = y
self._it = 0
def xy(self) -> tuple:
"""Return the concatenated data."""
return self.x, self.y
def next(self, input_data: Callable) -> bool:
if self._it == self.n_batches:
return False
X, y = self.xs[self._it], self.ys[self._it]
if self.device == "cuda":
import cudf
import cupy
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
|
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..compat import import_cupy
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
cp = import_cupy()
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
def check_invalid_cat_batches(device: str) -> None:
"""Check error message for inconsistent feature types."""
class _InvalidCatIter(DataIter):
def __init__(self) -> None:
super().__init__(cache_prefix=None)
self._it = 0
def next(self, input_data: Callable) -> bool:
if self._it == 2:
return False
X, y = tm.make_categorical(
64,
12,
4,
onehot=False,
sparsity=0.5,
cat_ratio=1.0 if self._it == 0 else 0.5,
)
if device == "cuda":
import cudf
import cupy
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
it = _InvalidCatIter()
import pytest
with pytest.raises(ValueError, match="Inconsistent feature types between batches"):
ExtMemQuantileDMatrix(it, enable_categorical=True)
def check_uneven_sizes(device: str) -> None:
"""Tests for having irregular data shapes."""
batches = [
tm.make_regression(n_samples, 16, use_cupy=device == "cuda")
for n_samples in [512, 256, 1024]
]
unzip = list(zip(*batches))
it = tm.IteratorForTest(unzip[0], unzip[1], None, cache="cache", on_host=True)
Xy = DMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
Xy = ExtMemQuantileDMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
class CatIter(DataIter): # pylint: disable=too-many-instance-attributes
"""An iterator for testing categorical features."""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
n_samples_per_batch: int,
n_features: int,
*,
n_batches: int,
n_cats: int,
sparsity: float,
cat_ratio: float,
onehot: bool,
device: str,
cache: Optional[str],
) -> None:
super().__init__(cache_prefix=cache)
self.n_batches = n_batches
self.device = device
n_samples = n_samples_per_batch * n_batches
cat, y = tm.make_categorical(
n_samples,
n_features,
n_categories=n_cats,
onehot=onehot,
cat_ratio=cat_ratio,
sparsity=sparsity,
)
xs, ys = [], []
prev = 0
for _ in range(n_batches):
n = min(n_samples_per_batch, n_samples - prev)
X = cat.iloc[prev : prev + n, :]
xs.append(X)
ys.append(y[prev : prev + n])
prev += n_samples_per_batch
self.xs = xs
self.ys = ys
self.x = cat
self.y = y
self._it = 0
def xy(self) -> tuple:
"""Return the concatenated data."""
return self.x, self.y
def next(self, input_data: Callable) -> bool:
if self._it == self.n_batches:
return False
X, y = self.xs[self._it], self.ys[self._it]
if self.device == "cuda":
import cudf
import cupy
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.layer import Layer
from keras.src.ops import operation_utils
@keras_export("keras.layers.Reshape")
class Reshape(Layer):
"""Layer that reshapes inputs into the given shape.
Args:
target_shape: Target shape. Tuple of integers, does not include the
samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shape must be
known/fixed. Use the keyword argument `input_shape` (tuple of integers,
does not include the samples/batch size axis) when using this layer as
the first layer in a model.
Output shape:
`(batch_size, *target_shape)`
Example:
>>> x = keras.Input(shape=(12,))
>>> y = keras.layers.Reshape((3, 4))(x)
>>> y.shape
(None, 3, 4)
>>> # also supports shape inference using `-1` as dimension
>>> y = keras.layers.Reshape((-1, 2, 2))(x)
>>> y.shape
(None, 3, 2, 2)
"""
def __init__(self, target_shape, **kwargs):
super().__init__(**kwargs)
self.target_shape = tuple(target_shape)
def compute_output_shape(self, input_shape):
return (
input_shape[0],
*operation_utils.compute_reshape_output_shape(
input_shape[1:], self.target_shape, "target_shape"
),
)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def build(self, input_shape):
sample_output_shape = operation_utils.compute_reshape_output_shape(
input_shape[1:], self.target_shape, "target_shape"
)
self._resolved_target_shape = tuple(
-1 if d is None else d for d in sample_output_shape
)
def call(self, inputs):
return ops.reshape(
inputs, (ops.shape(inputs)[0],) + self._resolved_target_shape
)
def get_config(self):
config = {"target_shape": self.target_shape}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.layer import Layer
from keras.src.ops import operation_utils
@keras_export("keras.layers.Reshape")
class Reshape(Layer):
"""Layer that reshapes inputs into the given shape.
Args:
target_shape: Target shape. Tuple of integers, does not include the
samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shape must be
known/fixed. Use the keyword argument `input_shape` (tuple of integers,
does not include the samples/batch size axis) when using this layer as
the first layer in a model.
Output shape:
`(batch_size, *target_shape)`
Example:
>>> x = keras.Input(shape=(12,))
>>> y = keras.layers.Reshape((3, 4))(x)
>>> y.shape
(None, 3, 4)
>>> # also supports shape inference using `-1` as dimension
>>> y = keras.layers.Reshape((-1, 2, 2))(x)
>>> y.shape
(None, 3, 2, 2)
"""
def __init__(self, target_shape, **kwargs):
super().__init__(**kwargs)
self.target_shape = tuple(target_shape)
def compute_output_shape(self, input_shape):
return (
input_shape[0],
*operation_utils.compute_reshape_output_shape(
input_shape[1:], self.target_shape, "target_shape"
),
)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def build(self, input_shape):
sample_output_shape = operation_utils.compute_reshape_output_shape(
input_shape[1:], self.target_shape, "target_shape"
)
self._resolved_target_shape = tuple(
-1 if d is None else d for d in sample_output_shape
)
self.built = True
def call(self, inputs):
return ops.reshape(
inputs, (ops.shape(inputs)[0],) + self._resolved_target_shape
)
def get_config(self):
config = {"target_shape": self.target_shape}
base_config = super().get_config()
return {**base_config, **config}
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from ..utils.misc import unpack_gt_instances
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
outputs = unpack_gt_instances(batch_data_samples)
batch_gt_instances, batch_gt_instances_ignore, batch_img_metas \
= outputs
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.loss(x, label_assignment_results,
batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
return losses
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base, hdemucs_high
@dataclass
class SourceSeparationBundle:
"""torchaudio.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained Source Separation pipeline with *ConvTasNet* [:footcite:`Luo_2019`] trained on
*Libri2Mix dataset* [:footcite:`cosentino2020librimix`].
The source separation model is constructed by :py:func:`torchaudio.models.conv_tasnet_base`
and is trained using the training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB_PLUS = SourceSeparationBundle(
_model_path="models/hdemucs_high_trained.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB_PLUS.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation trained on MUSDB-HQ [:footcite:`MUSDB18HQ`] and additional internal training data.
The model is constructed by :py:func:`torchaudio.prototype.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
HDEMUCS_HIGH_MUSDB = SourceSeparationBundle(
_model_path="models/hdemucs_high_musdbhq_only.pt",
_model_factory_func=partial(hdemucs_high, sources=["drums", "bass", "other", "vocals"]),
_sample_rate=44100,
)
HDEMUCS_HIGH_MUSDB.__doc__ = """Pre-trained *Hybrid Demucs* [:footcite:`defossez2021hybrid`] pipeline for music
source separation trained on MUSDB-HQ [:footcite:`MUSDB18HQ`].
The model is constructed by :py:func:`torchaudio.prototype.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.models import conv_tasnet_base
@dataclass
class SourceSeparationBundle:
"""torchaudio.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained Source Separation pipeline with *ConvTasNet* [:footcite:`Luo_2019`] trained on
*Libri2Mix dataset* [:footcite:`cosentino2020librimix`].
The source separation model is constructed by :py:func:`torchaudio.models.conv_tasnet_base`
and is trained using the training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.msword import MsWordParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MsWordParser": "langchain_community.document_loaders.parsers.msword",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MsWordParser",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.msword import MsWordParser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MsWordParser": "langchain_community.document_loaders.parsers.msword"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MsWordParser",
]
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
image_size = (640, 640)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
model = dict(
data_preprocessor=dict(pad_size_divisor=64, batch_augments=batch_augments),
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
norm_cfg=norm_cfg,
num_outs=5),
roi_head=dict(
bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
allow_negative_crop=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# learning policy
max_epochs = 50
train_cfg = dict(max_epochs=max_epochs, val_interval=2)
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[30, 40],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True),
clip_grad=None)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(norm_cfg=norm_cfg, norm_eval=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
norm_cfg=norm_cfg,
num_outs=5),
roi_head=dict(
bbox_head=dict(norm_cfg=norm_cfg), mask_head=dict(norm_cfg=norm_cfg)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# learning policy
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(max_epochs=50)
evaluation = dict(interval=2)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
import json
import re
from re import Pattern
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
"""Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
raise ValueError("action not found")
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
raise OutputParserException(
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception as exc:
if not includes_answer:
raise OutputParserException(
f"Could not parse LLM output: {text}"
) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "chat"
|
import json
import re
from typing import Pattern, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
from langchain.agents.chat.prompt import FORMAT_INSTRUCTIONS
FINAL_ANSWER_ACTION = "Final Answer:"
class ChatOutputParser(AgentOutputParser):
"""Output parser for the chat agent."""
format_instructions: str = FORMAT_INSTRUCTIONS
"""Default formatting instructions"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
def get_format_instructions(self) -> str:
"""Returns formatting instructions for the given output parser."""
return self.format_instructions
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
"""Parse the output from the agent into
an AgentAction or AgentFinish object.
Args:
text: The text to parse.
Returns:
An AgentAction or AgentFinish object.
Raises:
OutputParserException: If the output could not be parsed.
ValueError: If the action could not be found.
"""
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
raise ValueError("action not found")
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
raise OutputParserException(
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
return AgentAction(
response["action"], response.get("action_input", {}), text
)
except Exception as exc:
if not includes_answer:
raise OutputParserException(
f"Could not parse LLM output: {text}"
) from exc
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "chat"
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.models import (
CSRSparsity,
MLMTransformer,
SpladePooling,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.models import (
CSRSparsity,
MLMTransformer,
SpladePooling,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Ask Update to TOM on loss to implement
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
"""Gemini embeddings file."""
import deprecated
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as gemini
@deprecated.deprecated(
reason=(
"Should use `llama-index-embeddings-google-genai` instead, using Google's latest unified SDK. "
"See: https://docs.llamaindex.ai/en/stable/examples/embeddings/google_genai/"
)
)
class GeminiEmbedding(BaseEmbedding):
"""Google Gemini embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
title: Optional[str] = Field(
default="",
description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid.",
)
task_type: Optional[str] = Field(
default="retrieval_document",
description="The task for embedding model.",
)
def __init__(
self,
model_name: str = "models/embedding-001",
task_type: Optional[str] = "retrieval_document",
api_key: Optional[str] = None,
title: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
title=title,
task_type=task_type,
**kwargs,
)
gemini.configure(api_key=api_key)
self._model = gemini
@classmethod
def class_name(cls) -> str:
return "GeminiEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.embed_content(
model=self.model_name,
content=query,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return [
self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
for text in texts
]
### Async methods ###
# need to wait async calls from Gemini side to be implemented.
# Issue: https://github.com/google/generative-ai-python/issues/125
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._get_text_embedding(text)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return self._get_text_embeddings(texts)
|
"""Gemini embeddings file."""
from typing import Any, List, Optional
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
BaseEmbedding,
)
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.callbacks.base import CallbackManager
import google.generativeai as gemini
class GeminiEmbedding(BaseEmbedding):
"""Google Gemini embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
title: Optional[str] = Field(
default="",
description="Title is only applicable for retrieval_document tasks, and is used to represent a document title. For other tasks, title is invalid.",
)
task_type: Optional[str] = Field(
default="retrieval_document",
description="The task for embedding model.",
)
def __init__(
self,
model_name: str = "models/embedding-001",
task_type: Optional[str] = "retrieval_document",
api_key: Optional[str] = None,
title: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
title=title,
task_type=task_type,
**kwargs,
)
gemini.configure(api_key=api_key)
self._model = gemini
@classmethod
def class_name(cls) -> str:
return "GeminiEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.embed_content(
model=self.model_name,
content=query,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return [
self._model.embed_content(
model=self.model_name,
content=text,
title=self.title,
task_type=self.task_type,
)["embedding"]
for text in texts
]
### Async methods ###
# need to wait async calls from Gemini side to be implemented.
# Issue: https://github.com/google/generative-ai-python/issues/125
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return self._get_query_embedding(query)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._get_text_embedding(text)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return self._get_text_embeddings(texts)
|
import gzip
from os import PathLike
from pathlib import Path
from typing import Union
import pytest
import yaml
from vcr import VCR
from vcr.persisters.filesystem import CassetteNotFoundError
from vcr.request import Request
class CustomSerializer:
"""Custom serializer for VCR cassettes using YAML and gzip.
We're using a custom serializer to avoid the default yaml serializer
used by VCR, which is not designed to be safe for untrusted input.
This step is an extra precaution necessary because the cassette files
are in compressed YAML format, which makes it more difficult to inspect
their contents during development or debugging.
"""
@staticmethod
def serialize(cassette_dict: dict) -> bytes:
"""Convert cassette to YAML and compress it."""
cassette_dict["requests"] = [
request._to_dict() for request in cassette_dict["requests"]
]
yml = yaml.safe_dump(cassette_dict)
return gzip.compress(yml.encode("utf-8"))
@staticmethod
def deserialize(data: bytes) -> dict:
"""Decompress data and convert it from YAML."""
text = gzip.decompress(data).decode("utf-8")
cassette = yaml.safe_load(text)
cassette["requests"] = [
Request._from_dict(request) for request in cassette["requests"]
]
return cassette
class CustomPersister:
"""A custom persister for VCR that uses the CustomSerializer."""
@classmethod
def load_cassette(
cls, cassette_path: Union[str, PathLike[str]], serializer: CustomSerializer
) -> tuple[dict, dict]:
"""Load a cassette from a file."""
# If cassette path is already Path this is a no-op
cassette_path = Path(cassette_path)
if not cassette_path.is_file():
raise CassetteNotFoundError(
f"Cassette file {cassette_path} does not exist."
)
with cassette_path.open(mode="rb") as f:
data = f.read()
deser = serializer.deserialize(data)
return deser["requests"], deser["responses"]
@staticmethod
def save_cassette(
cassette_path: Union[str, PathLike[str]],
cassette_dict: dict,
serializer: CustomSerializer,
) -> None:
"""Save a cassette to a file."""
data = serializer.serialize(cassette_dict)
# if cassette path is already Path this is no operation
cassette_path = Path(cassette_path)
cassette_folder = cassette_path.parent
if not cassette_folder.exists():
cassette_folder.mkdir(parents=True)
with cassette_path.open("wb") as f:
f.write(data)
# A list of headers that should be filtered out of the cassettes.
# These are typically associated with sensitive information and should
# not be stored in cassettes.
_BASE_FILTER_HEADERS = [
("authorization", "PLACEHOLDER"),
("x-api-key", "PLACEHOLDER"),
("api-key", "PLACEHOLDER"),
]
@pytest.fixture(scope="session")
def _base_vcr_config() -> dict:
"""Configuration that every cassette will receive.
(Anything permitted by vcr.VCR(**kwargs) can be put here.)
"""
return {
"record_mode": "once",
"filter_headers": _BASE_FILTER_HEADERS.copy(),
"match_on": ["method", "uri", "body"],
"allow_playback_repeats": True,
"decode_compressed_response": True,
"cassette_library_dir": "tests/cassettes",
"path_transformer": VCR.ensure_suffix(".yaml"),
}
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict:
return _base_vcr_config
|
import base64
import gzip
import pytest
from vcr import VCR # type: ignore[import-untyped]
from vcr.serializers import yamlserializer # type: ignore[import-untyped]
class YamlGzipSerializer:
@staticmethod
def serialize(cassette_dict: dict) -> str:
raw = yamlserializer.serialize(cassette_dict).encode("utf-8")
compressed = gzip.compress(raw)
return base64.b64encode(compressed).decode("ascii")
@staticmethod
def deserialize(data: str) -> dict:
compressed = base64.b64decode(data.encode("ascii"))
text = gzip.decompress(compressed).decode("utf-8")
return yamlserializer.deserialize(text)
_BASE_FILTER_HEADERS = [
("authorization", "PLACEHOLDER"),
("x-api-key", "PLACEHOLDER"),
("api-key", "PLACEHOLDER"),
]
@pytest.fixture(scope="session")
def _base_vcr_config() -> dict:
"""
Configuration that every cassette will receive.
(Anything permitted by vcr.VCR(**kwargs) can be put here.)
"""
return {
"record_mode": "once",
"filter_headers": _BASE_FILTER_HEADERS.copy(),
"match_on": ["method", "scheme", "host", "port", "path", "query"],
"decode_compressed_response": True,
"cassette_library_dir": "tests/cassettes",
"path_transformer": VCR.ensure_suffix(".yaml"),
}
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict:
return _base_vcr_config
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_addoption(parser):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_addoption(parser):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)
|
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, TransformersTokenizerWrapper, WordTokenizer
__all__ = [
"WordTokenizer",
"WhitespaceTokenizer",
"PhraseTokenizer",
"ENGLISH_STOP_WORDS",
"TransformersTokenizerWrapper",
]
|
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
from collections import OrderedDict
from typing import Any, Dict, Optional, Type, cast
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.response_synthesizers import Refine
from llama_index.core.response_synthesizers.refine import StructuredRefineResponse
from llama_index.core.types import BasePydanticProgram
class MockRefineProgram(BasePydanticProgram):
"""
Runs the query on the LLM as normal and always returns the answer with
query_satisfied=True. In effect, doesn't do any answer filtering.
"""
def __init__(self, input_to_query_satisfied: Dict[str, bool]):
self._input_to_query_satisfied = input_to_query_satisfied
@property
def output_cls(self) -> Type[BaseModel]:
return StructuredRefineResponse
def __call__(
self,
*args: Any,
context_str: Optional[str] = None,
context_msg: Optional[str] = None,
**kwargs: Any,
) -> StructuredRefineResponse:
input_str = context_str or context_msg
input_str = cast(str, input_str)
query_satisfied = self._input_to_query_satisfied[input_str]
return StructuredRefineResponse(
answer=input_str, query_satisfied=query_satisfied
)
async def acall(
self,
*args: Any,
context_str: Optional[str] = None,
context_msg: Optional[str] = None,
**kwargs: Any,
) -> StructuredRefineResponse:
input_str = context_str or context_msg
input_str = cast(str, input_str)
query_satisfied = self._input_to_query_satisfied[input_str]
return StructuredRefineResponse(
answer=input_str, query_satisfied=query_satisfied
)
@pytest.fixture()
def refine_instance() -> Refine:
return Refine(
streaming=False,
verbose=True,
structured_answer_filtering=True,
)
def test_constructor_args() -> None:
with pytest.raises(ValueError):
# can't construct refine with both streaming and answer filtering
Refine(
streaming=True,
structured_answer_filtering=True,
)
with pytest.raises(ValueError):
# can't construct refine with a program factory but not answer filtering
Refine(
program_factory=lambda _: MockRefineProgram({}),
structured_answer_filtering=False,
)
@pytest.mark.asyncio
async def test_answer_filtering_one_answer() -> None:
input_to_query_satisfied = OrderedDict(
[
("input1", False),
("input2", True),
("input3", False),
]
)
def program_factory(*args: Any, **kwargs: Any) -> MockRefineProgram:
return MockRefineProgram(input_to_query_satisfied)
refine_instance = Refine(
structured_answer_filtering=True,
program_factory=program_factory,
)
res = await refine_instance.aget_response(
"question", list(input_to_query_satisfied.keys())
)
assert res == "input2"
@pytest.mark.asyncio
async def test_answer_filtering_no_answers() -> None:
input_to_query_satisfied = OrderedDict(
[
("input1", False),
("input2", False),
("input3", False),
]
)
def program_factory(*args: Any, **kwargs: Any) -> MockRefineProgram:
return MockRefineProgram(input_to_query_satisfied)
refine_instance = Refine(
structured_answer_filtering=True,
program_factory=program_factory,
)
res = await refine_instance.aget_response(
"question", list(input_to_query_satisfied.keys())
)
assert res == "Empty Response"
|
from collections import OrderedDict
from typing import Any, Dict, Optional, Type, cast
import pytest
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.response_synthesizers import Refine
from llama_index.core.response_synthesizers.refine import StructuredRefineResponse
from llama_index.core.types import BasePydanticProgram
class MockRefineProgram(BasePydanticProgram):
"""
Runs the query on the LLM as normal and always returns the answer with
query_satisfied=True. In effect, doesn't do any answer filtering.
"""
def __init__(self, input_to_query_satisfied: Dict[str, bool]):
self._input_to_query_satisfied = input_to_query_satisfied
@property
def output_cls(self) -> Type[BaseModel]:
return StructuredRefineResponse
def __call__(
self,
*args: Any,
context_str: Optional[str] = None,
context_msg: Optional[str] = None,
**kwargs: Any,
) -> StructuredRefineResponse:
input_str = context_str or context_msg
input_str = cast(str, input_str)
query_satisfied = self._input_to_query_satisfied[input_str]
return StructuredRefineResponse(
answer=input_str, query_satisfied=query_satisfied
)
async def acall(
self,
*args: Any,
context_str: Optional[str] = None,
context_msg: Optional[str] = None,
**kwargs: Any,
) -> StructuredRefineResponse:
input_str = context_str or context_msg
input_str = cast(str, input_str)
query_satisfied = self._input_to_query_satisfied[input_str]
return StructuredRefineResponse(
answer=input_str, query_satisfied=query_satisfied
)
@pytest.fixture()
def refine_instance() -> Refine:
return Refine(
streaming=False,
verbose=True,
structured_answer_filtering=True,
)
def test_constructor_args() -> None:
with pytest.raises(ValueError):
# can't construct refine with both streaming and answer filtering
Refine(
streaming=True,
structured_answer_filtering=True,
)
with pytest.raises(ValueError):
# can't construct refine with a program factory but not answer filtering
Refine(
program_factory=lambda _: MockRefineProgram({}),
structured_answer_filtering=False,
)
@pytest.mark.asyncio()
async def test_answer_filtering_one_answer() -> None:
input_to_query_satisfied = OrderedDict(
[
("input1", False),
("input2", True),
("input3", False),
]
)
def program_factory(*args: Any, **kwargs: Any) -> MockRefineProgram:
return MockRefineProgram(input_to_query_satisfied)
refine_instance = Refine(
structured_answer_filtering=True,
program_factory=program_factory,
)
res = await refine_instance.aget_response(
"question", list(input_to_query_satisfied.keys())
)
assert res == "input2"
@pytest.mark.asyncio()
async def test_answer_filtering_no_answers() -> None:
input_to_query_satisfied = OrderedDict(
[
("input1", False),
("input2", False),
("input3", False),
]
)
def program_factory(*args: Any, **kwargs: Any) -> MockRefineProgram:
return MockRefineProgram(input_to_query_satisfied)
refine_instance = Refine(
structured_answer_filtering=True,
program_factory=program_factory,
)
res = await refine_instance.aget_response(
"question", list(input_to_query_satisfied.keys())
)
assert res == "Empty Response"
|
from typing import Any, Dict, Optional, Sequence
from llama_index.core.base.base_selector import (
BaseSelector,
SelectorResult,
SingleSelection,
)
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.indices.query.embedding_utils import get_top_k_embeddings
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.schema import QueryBundle
from llama_index.core.settings import Settings
from llama_index.core.tools.types import ToolMetadata
class EmbeddingSingleSelector(BaseSelector):
"""
Embedding selector.
Embedding selector that chooses one out of many options.
Args:
embed_model (BaseEmbedding): An embedding model.
"""
def __init__(
self,
embed_model: BaseEmbedding,
) -> None:
self._embed_model = embed_model
@classmethod
def from_defaults(
cls,
embed_model: Optional[BaseEmbedding] = None,
) -> "EmbeddingSingleSelector":
# optionally initialize defaults
embed_model = embed_model or Settings.embed_model
# construct prompt
return cls(embed_model)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
def _select(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
query_embedding = self._embed_model.get_query_embedding(query.query_str)
text_embeddings = [
self._embed_model.get_text_embedding(choice.description)
for choice in choices
]
top_similarities, top_ids = get_top_k_embeddings(
query_embedding,
text_embeddings,
similarity_top_k=1,
embedding_ids=list(range(len(choices))),
)
# get top choice
top_selection_reason = f"Top similarity match: {top_similarities[0]:.2f}, {choices[top_ids[0]].name}"
top_selection = SingleSelection(index=top_ids[0], reason=top_selection_reason)
# parse output
return SelectorResult(selections=[top_selection])
async def _aselect(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
query_embedding = await self._embed_model.aget_query_embedding(query.query_str)
text_embeddings = [
await self._embed_model.aget_text_embedding(choice.description)
for choice in choices
]
top_similarities, top_ids = get_top_k_embeddings(
query_embedding,
text_embeddings,
similarity_top_k=1,
embedding_ids=list(range(len(choices))),
)
# get top choice
top_selection_reason = f"Top similarity match: {top_similarities[0]:.2f}, {choices[top_ids[0]].name}"
top_selection = SingleSelection(index=top_ids[0], reason=top_selection_reason)
# parse output
return SelectorResult(selections=[top_selection])
|
from typing import Any, Dict, Optional, Sequence
from llama_index.core.base.base_selector import (
BaseSelector,
SelectorResult,
SingleSelection,
)
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.indices.query.embedding_utils import get_top_k_embeddings
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.schema import QueryBundle
from llama_index.core.settings import Settings
from llama_index.core.tools.types import ToolMetadata
class EmbeddingSingleSelector(BaseSelector):
"""Embedding selector.
Embedding selector that chooses one out of many options.
Args:
embed_model (BaseEmbedding): An embedding model.
"""
def __init__(
self,
embed_model: BaseEmbedding,
) -> None:
self._embed_model = embed_model
@classmethod
def from_defaults(
cls,
embed_model: Optional[BaseEmbedding] = None,
) -> "EmbeddingSingleSelector":
# optionally initialize defaults
embed_model = embed_model or Settings.embed_model
# construct prompt
return cls(embed_model)
def _get_prompts(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
def _select(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
query_embedding = self._embed_model.get_query_embedding(query.query_str)
text_embeddings = [
self._embed_model.get_text_embedding(choice.description)
for choice in choices
]
top_similarities, top_ids = get_top_k_embeddings(
query_embedding,
text_embeddings,
similarity_top_k=1,
embedding_ids=list(range(len(choices))),
)
# get top choice
top_selection_reason = f"Top similarity match: {top_similarities[0]:.2f}, {choices[top_ids[0]].name}"
top_selection = SingleSelection(index=top_ids[0], reason=top_selection_reason)
# parse output
return SelectorResult(selections=[top_selection])
async def _aselect(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
query_embedding = await self._embed_model.aget_query_embedding(query.query_str)
text_embeddings = [
await self._embed_model.aget_text_embedding(choice.description)
for choice in choices
]
top_similarities, top_ids = get_top_k_embeddings(
query_embedding,
text_embeddings,
similarity_top_k=1,
embedding_ids=list(range(len(choices))),
)
# get top choice
top_selection_reason = f"Top similarity match: {top_similarities[0]:.2f}, {choices[top_ids[0]].name}"
top_selection = SingleSelection(index=top_ids[0], reason=top_selection_reason)
# parse output
return SelectorResult(selections=[top_selection])
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
if self.openai_pre_1_0:
condition = results["flagged"]
else:
condition = results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
"""Pass input through a moderation endpoint."""
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
if self.openai_pre_1_0:
condition = results["flagged"]
else:
condition = results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
from __future__ import annotations
import json
import logging
import os
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: list[str], word_weights: dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super().__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
f"{num_unknown_words} of {len(vocab)} words without a weighting value. Set weight to {unknown_word_weight}"
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
from __future__ import annotations
import json
import logging
import os
import torch
from torch import Tensor, nn
logger = logging.getLogger(__name__)
class WordWeights(nn.Module):
"""This model can weight word embeddings, for example, with idf-values."""
def __init__(self, vocab: list[str], word_weights: dict[str, float], unknown_word_weight: float = 1):
"""
Initializes the WordWeights class.
Args:
vocab (List[str]): Vocabulary of the tokenizer.
word_weights (Dict[str, float]): Mapping of tokens to a float weight value. Word embeddings are multiplied
by this float value. Tokens in word_weights must not be equal to the vocab (can contain more or less values).
unknown_word_weight (float, optional): Weight for words in vocab that do not appear in the word_weights lookup.
These can be, for example, rare words in the vocab where no weight exists. Defaults to 1.
"""
super(WordWeights, self).__init__()
self.config_keys = ["vocab", "word_weights", "unknown_word_weight"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
weights.append(weight)
logger.info(
"{} of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.emb_layer = nn.Embedding(len(vocab), 1)
self.emb_layer.load_state_dict({"weight": torch.FloatTensor(weights).unsqueeze(1)})
def forward(self, features: dict[str, Tensor]):
attention_mask = features["attention_mask"]
token_embeddings = features["token_embeddings"]
# Compute a weight value for each token
token_weights_raw = self.emb_layer(features["input_ids"]).squeeze(-1)
token_weights = token_weights_raw * attention_mask.float()
token_weights_sum = torch.sum(token_weights, 1)
# Multiply embedding by token weight value
token_weights_expanded = token_weights.unsqueeze(-1).expand(token_embeddings.size())
token_embeddings = token_embeddings * token_weights_expanded
features.update({"token_embeddings": token_embeddings, "token_weights_sum": token_weights_sum})
return features
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return WordWeights(**config)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.pretrained_model_name_or_path == 'distilbert-base-uncased'
def test_tf_batch(docs_generator):
encoder = TransformerTFTextEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (target_dim,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTFTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(
pytest.lazy_fixture('docs_with_text'),
[[['r'], 10], [['c'], 0], [['cc'], 0]],
['r'],
),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[[['r'], 0], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[[['r'], 0], [['c'], 0], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path):
encoder = TransformerTFTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': traversal_path})
for path, count in docs_per_path:
assert len(docs.traverse_flat(path).get_attributes("embedding")) == count
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
import pytest
from jina import Document, DocumentArray
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_tf_batch(docs_generator):
encoder = TransformerTFTextEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (target_dim,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTFTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [[['r'], 10], [['c'], 0], [['cc'], 0]], ['r']),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[[['r'], 0], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[[['r'], 0], [['c'], 0], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path):
encoder = TransformerTFTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': traversal_path})
for path, count in docs_per_path:
assert len(docs.traverse_flat(path).get_attributes("embedding")) == count
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_bounding_box_format,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_pure_tensor, register_kernel # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image,
_get_dimensions_image_pil,
get_dimensions_video,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image,
_get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image,
_get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import _erase_image_pil, erase, erase_image, erase_video
from ._color import (
_adjust_brightness_image_pil,
_adjust_contrast_image_pil,
_adjust_gamma_image_pil,
_adjust_hue_image_pil,
_adjust_saturation_image_pil,
_adjust_sharpness_image_pil,
_autocontrast_image_pil,
_equalize_image_pil,
_invert_image_pil,
_permute_channels_image_pil,
_posterize_image_pil,
_rgb_to_grayscale_image_pil,
_solarize_image_pil,
adjust_brightness,
adjust_brightness_image,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image,
adjust_gamma_video,
adjust_hue,
adjust_hue_image,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image,
adjust_sharpness_video,
autocontrast,
autocontrast_image,
autocontrast_video,
equalize,
equalize_image,
equalize_video,
invert,
invert_image,
invert_video,
permute_channels,
permute_channels_image,
permute_channels_video,
posterize,
posterize_image,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image,
solarize,
solarize_image,
solarize_video,
to_grayscale,
)
from ._geometry import (
_affine_image_pil,
_center_crop_image_pil,
_crop_image_pil,
_elastic_image_pil,
_five_crop_image_pil,
_horizontal_flip_image_pil,
_pad_image_pil,
_perspective_image_pil,
_resize_image_pil,
_resized_crop_image_pil,
_rotate_image_pil,
_ten_crop_image_pil,
_vertical_flip_image_pil,
affine,
affine_bounding_boxes,
affine_image,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
_gaussian_blur_image_pil,
convert_image_dtype,
gaussian_blur,
gaussian_blur_image,
gaussian_blur_video,
normalize,
normalize_image,
normalize_video,
to_dtype,
to_dtype_image,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metadata keys
CUSTOM_METADATA_KEY = "DCP_SHARDING_INFO"
DEFAULT_EXTRA_METADATA_KEY = "__metadata__"
SAVED_OFFSETS_KEY = "saved_offsets"
SHAPE_KEY = "shape"
DATA_KEY = "data"
DTYPE_KEY = "dtype"
DATA_OFFSETS_KEY = "data_offsets"
DTYPE_MAP = {
"F16": torch.float16,
"F32": torch.float32,
"F64": torch.float64,
"I8": torch.int8,
"U8": torch.uint8,
"I16": torch.int16,
"I32": torch.int32,
"I64": torch.int64,
"BF16": torch.bfloat16,
}
HF_DCP_VERSION: float = 1.0
DCP_VERSION_KEY = "DCP_VERSION"
DCP_SHARDING_INFO_KEY = "DCP_SHARDING_INFO"
FORMAT_KEY = "format"
FORMAT_VALUE = "pt"
NUM_BYTES_FOR_HEADER_LEN = 8
SHARDED_DIR_NAME = "sharded"
@dataclass
class _HFStorageInfo:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
shape: torch.Size
dtype: torch.dtype
def _gen_file_name(
index: int, largest_index: int, shard_index: Optional[int] = None
) -> str:
if shard_index is not None:
return (
SHARDED_FILE_NAME.format(
shard_idx=f"{shard_index}".zfill(5),
cpt_idx=f"{index}".zfill(5),
num_files=f"{largest_index}".zfill(5),
)
+ SUFFIX
)
else:
return (
FILE_NAME.format(
cpt_idx=f"{index}".zfill(5), num_files=f"{largest_index}".zfill(5)
)
+ SUFFIX
)
def _get_safetensors_file_metadata(file_bytes: io.IOBase) -> tuple[Any, int]:
# this uses the same logic that's done in HF code base
# https://github.com/2404589803/huggingface_hub/blob/main/src/huggingface_hub/hf_api.py#L5308
# and follows their documentation on how their files are serialized
# https://huggingface.co/docs/safetensors/index#format
header_len_bytes = file_bytes.read(NUM_BYTES_FOR_HEADER_LEN)
header_len = struct.unpack("<Q", header_len_bytes)[0]
header_json = file_bytes.read(header_len)
metadata = json.loads(header_json)
return (metadata, header_len + NUM_BYTES_FOR_HEADER_LEN)
def _get_dtype(dtype_str: str) -> torch.dtype:
try:
dtype = DTYPE_MAP[dtype_str]
except KeyError:
dtype = torch.get_default_dtype()
return dtype
def _get_dcp_custom_metadata(metadata: Any) -> Optional[Any]:
if DEFAULT_EXTRA_METADATA_KEY in metadata:
custom_metadata = metadata[DEFAULT_EXTRA_METADATA_KEY]
if CUSTOM_METADATA_KEY in custom_metadata:
return json.loads(custom_metadata[CUSTOM_METADATA_KEY])
return None
|
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metadata keys
CUSTOM_METADATA_KEY = "DCP_SHARDING_INFO"
DEFAULT_EXTRA_METADATA_KEY = "__metadata__"
SAVED_OFFSETS_KEY = "saved_offsets"
SHAPE_KEY = "shape"
DATA_KEY = "data"
DTYPE_KEY = "dtype"
DATA_OFFSETS_KEY = "data_offsets"
DTYPE_MAP = {
"F16": torch.float16,
"F32": torch.float32,
"F64": torch.float64,
"I8": torch.int8,
"U8": torch.uint8,
"I16": torch.int16,
"I32": torch.int32,
"I64": torch.int64,
"BF16": torch.bfloat16,
}
HF_DCP_VERSION: float = 1.0
DCP_VERSION_KEY = "DCP_VERSION"
DCP_SHARDING_INFO_KEY = "DCP_SHARDING_INFO"
FORMAT_KEY = "format"
FORMAT_VALUE = "pt"
NUM_BYTES_FOR_HEADER_LEN = 8
@dataclass
class _HFStorageInfo:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
shape: torch.Size
dtype: torch.dtype
def _gen_file_name(
index: int, largest_index: int, shard_index: Optional[int] = None
) -> str:
if shard_index is not None:
return (
SHARDED_FILE_NAME.format(
shard_idx=f"{shard_index}".zfill(5),
cpt_idx=f"{index}".zfill(5),
num_files=f"{largest_index}".zfill(5),
)
+ SUFFIX
)
else:
return (
FILE_NAME.format(
cpt_idx=f"{index}".zfill(5), num_files=f"{largest_index}".zfill(5)
)
+ SUFFIX
)
def _get_safetensors_file_metadata(file_bytes: io.IOBase) -> tuple[Any, int]:
# this uses the same logic that's done in HF code base
# https://github.com/2404589803/huggingface_hub/blob/main/src/huggingface_hub/hf_api.py#L5308
# and follows their documentation on how their files are serialized
# https://huggingface.co/docs/safetensors/index#format
header_len_bytes = file_bytes.read(NUM_BYTES_FOR_HEADER_LEN)
header_len = struct.unpack("<Q", header_len_bytes)[0]
header_json = file_bytes.read(header_len)
metadata = json.loads(header_json)
return (metadata, header_len + NUM_BYTES_FOR_HEADER_LEN)
def _get_dtype(dtype_str: str) -> torch.dtype:
try:
dtype = DTYPE_MAP[dtype_str]
except KeyError:
dtype = torch.get_default_dtype()
return dtype
def _get_dcp_custom_metadata(metadata: Any) -> Optional[Any]:
if DEFAULT_EXTRA_METADATA_KEY in metadata:
custom_metadata = metadata[DEFAULT_EXTRA_METADATA_KEY]
if CUSTOM_METADATA_KEY in custom_metadata:
return json.loads(custom_metadata[CUSTOM_METADATA_KEY])
return None
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_mesh_extension = any(url.endswith(ext) for ext in MESH_FILE_FORMATS)
if not has_mesh_extension:
raise ValueError(
f'{cls.__name__} must have one of the following extensions:'
f'{MESH_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def _load_trimesh_instance(
self: T, force: Optional[str] = None
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param url: url to load data from
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force)
return mesh
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.typing.url.any_url import AnyUrl
if TYPE_CHECKING:
import trimesh
from pydantic import BaseConfig
from pydantic.fields import ModelField
MESH_FILE_FORMATS = ('obj', 'glb', 'ply')
T = TypeVar('T', bound='Url3D')
class Url3D(AnyUrl, ABC):
"""
URL to a .obj, .glb, or .ply file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config)
has_mesh_extension = any(url.endswith(ext) for ext in MESH_FILE_FORMATS)
if not has_mesh_extension:
raise ValueError(
f'{cls.__name__} must have one of the following extensions:'
f'{MESH_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def _load_trimesh_instance(
self: T, force: Optional[str] = None
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param url: url to load data from
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
import trimesh
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force)
return mesh
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .fileio import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .utils import *
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import gzip
import lzma
import time
import faiss
import numpy as np
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0] / 1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print("Done: {:.2f} sec".format(time.time() - start_time))
return sim, ind
def file_open(filepath):
# Function to allowing opening files based on file extension
if filepath.endswith(".gz"):
return gzip.open(filepath, "rt", encoding="utf8")
elif filepath.endswith("xz"):
return lzma.open(filepath, "rt", encoding="utf8")
else:
return open(filepath, "r", encoding="utf8")
|
"""
This file contains some utilities functions used to find parallel sentences
in two monolingual corpora.
Code in this file has been adapted from the LASER repository:
https://github.com/facebookresearch/LASER
"""
import faiss
import numpy as np
import time
import gzip
import lzma
######## Functions to find and score candidates
def score(x, y, fwd_mean, bwd_mean, margin):
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin):
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin)
return scores
def kNN(x, y, k, use_ann_search=False, ann_num_clusters=32768, ann_num_cluster_probe=3):
start_time = time.time()
if use_ann_search:
print("Perform approx. kNN search")
n_cluster = min(ann_num_clusters, int(y.shape[0]/1000))
quantizer = faiss.IndexFlatIP(y.shape[1])
index = faiss.IndexIVFFlat(quantizer, y.shape[1], n_cluster, faiss.METRIC_INNER_PRODUCT)
index.nprobe = ann_num_cluster_probe
index.train(y)
index.add(y)
sim, ind = index.search(x, k)
else:
print("Perform exact search")
idx = faiss.IndexFlatIP(y.shape[1])
idx.add(y)
sim, ind = idx.search(x, k)
print("Done: {:.2f} sec".format(time.time()-start_time))
return sim, ind
def file_open(filepath):
#Function to allowing opening files based on file extension
if filepath.endswith('.gz'):
return gzip.open(filepath, 'rt', encoding='utf8')
elif filepath.endswith('xz'):
return lzma.open(filepath, 'rt', encoding='utf8')
else:
return open(filepath, 'r', encoding='utf8')
|
from jina.orchestrate.pods.factory import PodFactory
from tests.helper import _generate_pod_args
def test_pod_instantiate_start_same_context():
arg = _generate_pod_args()
pod_args = [arg, arg]
for args in pod_args:
pod = PodFactory.build_pod(args)
with pod:
pass
def test_pod_instantiate_start_different_context():
arg = _generate_pod_args()
pod_args = [arg, arg]
pods = []
for args in pod_args:
pods.append(PodFactory.build_pod(args))
for pod in pods:
with pod:
pass
|
from jina.parsers import set_pod_parser
from jina.orchestrate.pods.factory import PodFactory
def test_pod_instantiate_start_same_context():
arg = set_pod_parser().parse_args([])
pod_args = [arg, arg]
for args in pod_args:
pod = PodFactory.build_pod(args)
with pod:
pass
def test_pod_instantiate_start_different_context():
arg = set_pod_parser().parse_args([])
pod_args = [arg, arg]
pods = []
for args in pod_args:
pods.append(PodFactory.build_pod(args))
for pod in pods:
with pod:
pass
|
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
from .MarginMSELoss import MarginMSELoss
from .MatryoshkaLoss import MatryoshkaLoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MSELoss import MSELoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import SiameseDistanceMetric, ContrastiveLoss
from .ContrastiveTensionLoss import (
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
ContrastiveTensionDataLoader,
)
from .CoSENTLoss import CoSENTLoss
from .AnglELoss import AnglELoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
# Triplet losses
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .BatchAllTripletLoss import BatchAllTripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .CosineSimilarityLoss import CosineSimilarityLoss
from .SoftmaxLoss import SoftmaxLoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
from .MarginMSELoss import MarginMSELoss
from .MatryoshkaLoss import MatryoshkaLoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MSELoss import MSELoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import SiameseDistanceMetric, ContrastiveLoss
from .ContrastiveTensionLoss import (
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
ContrastiveTensionDataLoader,
)
from .CoSENTLoss import CoSENTLoss
from .AnglELoss import AnglELoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
# Triplet losses
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .BatchAllTripletLoss import BatchAllTripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'NumClassCheckHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .memory_profiler_hook import MemoryProfilerHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
@MODELS.register_module()
class ChannelMapper(BaseModule):
"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Default: None.
norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
normalization layer. Default: None.
act_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
activation layer in ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There would
be extra_convs when num_outs larger than the length of in_channels.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or dict],
optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(
self,
in_channels: List[int],
out_channels: int,
kernel_size: int = 3,
conv_cfg: OptConfigType = None,
norm_cfg: OptConfigType = None,
act_cfg: OptConfigType = dict(type='ReLU'),
num_outs: int = None,
init_cfg: OptMultiConfig = dict(
type='Xavier', layer='Conv2d', distribution='uniform')
) -> None:
super().__init__(init_cfg=init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs: Tuple[Tensor]) -> Tuple[Tensor]:
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmengine.model import BaseModule
from mmdet.registry import MODELS
@MODELS.register_module()
class ChannelMapper(BaseModule):
r"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
act_cfg (dict, optional): Config dict for activation layer in
ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There
would be extra_convs when num_outs larger than the length
of in_channels.
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
num_outs=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(ChannelMapper, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
|
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"SiameseDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
if isinstance(inpt, torch.Tensor) and (
torch.jit.is_scripting() or not isinstance(inpt, (features.Image, features.Video))
):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, features.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return features.Image.wrap_like(inpt, output)
elif isinstance(inpt, features.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return features.Video.wrap_like(inpt, output)
elif isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
else:
raise TypeError(
f"Input can either be a plain tensor, an `Image` or `Video` tensor subclass, or a PIL image, "
f"but got {type(inpt)} instead."
)
|
from typing import Union
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms.functional import pil_to_tensor, to_pil_image
def erase_image_tensor(
image: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
if not inplace:
image = image.clone()
image[..., i : i + h, j : j + w] = v
return image
@torch.jit.unused
def erase_image_pil(
image: PIL.Image.Image, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> PIL.Image.Image:
t_img = pil_to_tensor(image)
output = erase_image_tensor(t_img, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return to_pil_image(output, mode=image.mode)
def erase_video(
video: torch.Tensor, i: int, j: int, h: int, w: int, v: torch.Tensor, inplace: bool = False
) -> torch.Tensor:
return erase_image_tensor(video, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
def erase(
inpt: Union[features.ImageTypeJIT, features.VideoTypeJIT],
i: int,
j: int,
h: int,
w: int,
v: torch.Tensor,
inplace: bool = False,
) -> Union[features.ImageTypeJIT, features.VideoTypeJIT]:
if isinstance(inpt, torch.Tensor) and (
torch.jit.is_scripting() or not isinstance(inpt, (features.Image, features.Video))
):
return erase_image_tensor(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
elif isinstance(inpt, features.Image):
output = erase_image_tensor(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return features.Image.wrap_like(inpt, output)
elif isinstance(inpt, features.Video):
output = erase_video(inpt.as_subclass(torch.Tensor), i=i, j=j, h=h, w=w, v=v, inplace=inplace)
return features.Video.wrap_like(inpt, output)
else: # isinstance(inpt, PIL.Image.Image):
return erase_image_pil(inpt, i=i, j=j, h=h, w=w, v=v, inplace=inplace)
|
from docarray.documents.audio import Audio
from docarray.documents.image import Image
from docarray.documents.mesh import Mesh3D
from docarray.documents.point_cloud import PointCloud3D
from docarray.documents.text import Text
from docarray.documents.video import Video
__all__ = ['Text', 'Image', 'Audio', 'Mesh3D', 'PointCloud3D', 'Video']
|
from docarray.documents.audio import Audio
from docarray.documents.image import Image
from docarray.documents.mesh import Mesh3D
from docarray.documents.point_cloud import PointCloud3D
from docarray.documents.text import Text
__all__ = ['Text', 'Image', 'Audio', 'Mesh3D', 'PointCloud3D']
|
from os.path import join
from pathlib import Path
from typing import Any, Callable, Optional, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: Union[str, Path],
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Optional[Callable[[Union[str, Path]], Any]] = None,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: list[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: list[tuple[str, int]] = sum(self._character_images, [])
self.loader = loader
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L") if self.loader is None else self.loader(image_path)
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
|
from os.path import join
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
from .utils import check_integrity, download_and_extract_archive, list_dir, list_files
from .vision import VisionDataset
class Omniglot(VisionDataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
loader (callable, optional): A function to load an image given its path.
By default, it uses PIL as its image loader, but users could also pass in
``torchvision.io.decode_image`` for decoding image data into tensors directly.
"""
folder = "omniglot-py"
download_url_prefix = "https://raw.githubusercontent.com/brendenlake/omniglot/master/python"
zips_md5 = {
"images_background": "68d2efa1b9178cc56df9314c21c6e718",
"images_evaluation": "6b91aef0f799c5bb55b94e3f2daec811",
}
def __init__(
self,
root: Union[str, Path],
background: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
loader: Optional[Callable[[Union[str, Path]], Any]] = None,
) -> None:
super().__init__(join(root, self.folder), transform=transform, target_transform=target_transform)
self.background = background
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters: List[str] = sum(
([join(a, c) for c in list_dir(join(self.target_folder, a))] for a in self._alphabets), []
)
self._character_images = [
[(image, idx) for image in list_files(join(self.target_folder, character), ".png")]
for idx, character in enumerate(self._characters)
]
self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, [])
self.loader = loader
def __len__(self) -> int:
return len(self._flat_character_images)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
image_name, character_class = self._flat_character_images[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
image = Image.open(image_path, mode="r").convert("L") if self.loader is None else self.loader(image_path)
if self.transform:
image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _check_integrity(self) -> bool:
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + ".zip"), self.zips_md5[zip_filename]):
return False
return True
def download(self) -> None:
if self._check_integrity():
return
filename = self._get_target_folder()
zip_filename = filename + ".zip"
url = self.download_url_prefix + "/" + zip_filename
download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename])
def _get_target_folder(self) -> str:
return "images_background" if self.background else "images_evaluation"
|
_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
_delete_=True,
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
add_extra_convs=True,
start_level=1,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
train_cfg = dict(val_interval=2)
|
_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py'
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
neck=dict(
_delete_=True,
type='FPG',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
inter_channels=256,
num_outs=5,
add_extra_convs=True,
start_level=1,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()]))
evaluation = dict(interval=2)
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import gzip
import os
import tarfile
import sentence_transformers
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print(f"{src_lang}-{trg_lang} has {len(source_sentences)} sentences")
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, f"Tatoeba-{src_lang}-{trg_lang}-dev.tsv.gz"),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, f"Tatoeba-{src_lang}-{trg_lang}-train.tsv.gz"),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import gzip
import os
import tarfile
import sentence_transformers
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(["eng"])
target_languages = set(["deu", "ara", "tur", "spa", "ita", "fra"])
num_dev_sentences = 1000 # Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, "sentences.tar.bz2")
sentences_file = os.path.join(tatoeba_folder, "sentences.csv")
links_file_bz2 = os.path.join(tatoeba_folder, "links.tar.bz2")
links_file = os.path.join(tatoeba_folder, "links.csv")
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
# Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url + os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
# Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract("sentences.csv", path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract("links.csv", path=tatoeba_folder)
tar.close()
# Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding="utf8") as fIn:
for line in fIn:
id, lang, sentence = line.strip().split("\t")
if lang in all_langs:
sentences[id] = (lang, sentence)
# Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding="utf8") as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
# Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-dev.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(
os.path.join(output_folder, "Tatoeba-{}-{}-train.tsv.gz".format(src_lang, trg_lang)),
"wt",
encoding="utf8",
) as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent] + translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from packaging.version import parse
try:
import tf_keras as keras
except (ModuleNotFoundError, ImportError):
import keras
if parse(keras.__version__).major > 2:
raise ValueError(
"Your currently installed version of Keras is Keras 3, but this is not yet supported in "
"Transformers. Please install the backwards-compatible tf-keras package with "
"`pip install tf-keras`."
)
def _gelu(x):
"""
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
https://huggingface.co/papers/1606.08415
"""
x = tf.convert_to_tensor(x)
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))
return x * cdf
def _gelu_new(x):
"""
Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://huggingface.co/papers/1606.0841
Args:
x: float Tensor to perform activation
Returns:
`x` with the GELU activation applied.
"""
x = tf.convert_to_tensor(x)
pi = tf.cast(math.pi, x.dtype)
coeff = tf.cast(0.044715, x.dtype)
cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))
return x * cdf
def mish(x):
x = tf.convert_to_tensor(x)
return x * tf.tanh(tf.math.softplus(x))
def gelu_fast(x):
x = tf.convert_to_tensor(x)
coeff1 = tf.cast(0.044715, x.dtype)
coeff2 = tf.cast(0.7978845608, x.dtype)
return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x)))
def quick_gelu(x):
x = tf.convert_to_tensor(x)
coeff = tf.cast(1.702, x.dtype)
return x * tf.math.sigmoid(coeff * x)
def gelu_10(x):
"""
Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as
it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://huggingface.co/papers/2004.09602
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
https://huggingface.co/papers/1606.08415 :param x: :return:
"""
return tf.clip_by_value(_gelu(x), -10, 10)
def glu(x, axis=-1):
"""
Gated Linear Unit. Implementation as defined in the original paper (see https://huggingface.co/papers/1612.08083), where
the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B).
Args:
`x`: float Tensor to perform activation
`axis`: dimension across which `x` be split in half
Returns:
`x` with the GLU activation applied (with its size halved across the dimension `axis`).
"""
a, b = tf.split(x, 2, axis=axis)
return a * tf.math.sigmoid(b)
if parse(tf.version.VERSION) >= parse("2.4"):
def approximate_gelu_wrap(x):
return keras.activations.gelu(x, approximate=True)
gelu = keras.activations.gelu
gelu_new = approximate_gelu_wrap
else:
gelu = _gelu
gelu_new = _gelu_new
ACT2FN = {
"gelu": gelu,
"gelu_10": gelu_10,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": keras.activations.relu,
"sigmoid": keras.activations.sigmoid,
"silu": keras.activations.swish,
"swish": keras.activations.swish,
"tanh": keras.activations.tanh,
}
def get_tf_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from packaging.version import parse
try:
import tf_keras as keras
except (ModuleNotFoundError, ImportError):
import keras
if parse(keras.__version__).major > 2:
raise ValueError(
"Your currently installed version of Keras is Keras 3, but this is not yet supported in "
"Transformers. Please install the backwards-compatible tf-keras package with "
"`pip install tf-keras`."
)
def _gelu(x):
"""
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
https://arxiv.org/abs/1606.08415
"""
x = tf.convert_to_tensor(x)
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))
return x * cdf
def _gelu_new(x):
"""
Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841
Args:
x: float Tensor to perform activation
Returns:
`x` with the GELU activation applied.
"""
x = tf.convert_to_tensor(x)
pi = tf.cast(math.pi, x.dtype)
coeff = tf.cast(0.044715, x.dtype)
cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))
return x * cdf
def mish(x):
x = tf.convert_to_tensor(x)
return x * tf.tanh(tf.math.softplus(x))
def gelu_fast(x):
x = tf.convert_to_tensor(x)
coeff1 = tf.cast(0.044715, x.dtype)
coeff2 = tf.cast(0.7978845608, x.dtype)
return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x)))
def quick_gelu(x):
x = tf.convert_to_tensor(x)
coeff = tf.cast(1.702, x.dtype)
return x * tf.math.sigmoid(coeff * x)
def gelu_10(x):
"""
Clip the range of possible GeLU outputs between [-10, 10]. This is especially useful for quantization purpose, as
it allows mapping 2 negatives values in the GeLU spectrum. For more information on this trick, please refer to
https://arxiv.org/abs/2004.09602
Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
https://arxiv.org/abs/1606.08415 :param x: :return:
"""
return tf.clip_by_value(_gelu(x), -10, 10)
def glu(x, axis=-1):
"""
Gated Linear Unit. Implementation as defined in the original paper (see https://arxiv.org/abs/1612.08083), where
the input `x` is split in two halves across a dimension (`axis`), A and B, returning A * sigmoid(B).
Args:
`x`: float Tensor to perform activation
`axis`: dimension across which `x` be split in half
Returns:
`x` with the GLU activation applied (with its size halved across the dimension `axis`).
"""
a, b = tf.split(x, 2, axis=axis)
return a * tf.math.sigmoid(b)
if parse(tf.version.VERSION) >= parse("2.4"):
def approximate_gelu_wrap(x):
return keras.activations.gelu(x, approximate=True)
gelu = keras.activations.gelu
gelu_new = approximate_gelu_wrap
else:
gelu = _gelu
gelu_new = _gelu_new
ACT2FN = {
"gelu": gelu,
"gelu_10": gelu_10,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": keras.activations.relu,
"sigmoid": keras.activations.sigmoid,
"silu": keras.activations.swish,
"swish": keras.activations.swish,
"tanh": keras.activations.tanh,
}
def get_tf_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class CNN(Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
config_keys: list[str] = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
config_file_name: str = "cnn_config.json"
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: list[int] = [1, 3, 5],
stride_sizes: list[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json")) as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
preprocess_cfg=None,
pretrained=None,
init_cfg=None):
super(RetinaNet,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
preprocess_cfg, pretrained, init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
img_norm_cfg=None):
super(RetinaNet,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained, init_cfg, img_norm_cfg)
|
"""
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import logging
import os
import sys
import tarfile
from easynmt import EasyNMT
from sentence_transformers import LoggingHandler, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
target_lang = sys.argv[1]
output_folder = "multilingual-data"
data_folder = "../msmarco-data"
output_filename = os.path.join(output_folder, f"train_queries.en-{target_lang}.tsv")
os.makedirs(output_folder, exist_ok=True)
## Does the output file exists? If yes, read it so we can continue the translation
translated_qids = set()
if os.path.exists(output_filename):
with open(output_filename, encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
translated_qids.add(splits[0])
### Now we read the MS Marco dataset
os.makedirs(data_folder, exist_ok=True)
# Read qrels file for relevant positives per query
train_queries = {}
qrels_train = os.path.join(data_folder, "qrels.train.tsv")
if not os.path.exists(qrels_train):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.train.tsv", qrels_train)
with open(qrels_train) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split()
if qid not in translated_qids:
train_queries[qid] = None
# Read all queries
queries_filepath = os.path.join(data_folder, "queries.train.tsv")
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, "queries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/queries.tar.gz", tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
if qid in train_queries:
train_queries[qid] = query.strip()
qids = [qid for qid in train_queries if train_queries[qid] is not None]
queries = [train_queries[qid] for qid in qids]
# Define our translation model
translation_model = EasyNMT("opus-mt")
print(f"Start translation of {len(queries)} queries.")
print("This can take a while. But you can stop this script at any point")
with open(output_filename, "a" if os.path.exists(output_filename) else "w", encoding="utf8") as fOut:
for qid, query, translated_query in zip(
qids,
queries,
translation_model.translate_stream(
queries,
source_lang="en",
target_lang=target_lang,
beam_size=2,
perform_sentence_splitting=False,
chunk_size=256,
batch_size=64,
),
):
fOut.write("{}\t{}\n".format(qid, translated_query.replace("\t", " ")))
fOut.flush()
|
"""
This script translates the queries in the MS MARCO dataset to the defined target languages.
For machine translation, we use EasyNMT: https://github.com/UKPLab/EasyNMT
You can install it via: pip install easynmt
Usage:
python translate_queries [target_language]
"""
import logging
import os
import sys
import tarfile
from easynmt import EasyNMT
from sentence_transformers import LoggingHandler, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
target_lang = sys.argv[1]
output_folder = "multilingual-data"
data_folder = "../msmarco-data"
output_filename = os.path.join(output_folder, "train_queries.en-{}.tsv".format(target_lang))
os.makedirs(output_folder, exist_ok=True)
## Does the output file exists? If yes, read it so we can continue the translation
translated_qids = set()
if os.path.exists(output_filename):
with open(output_filename, "r", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
translated_qids.add(splits[0])
### Now we read the MS Marco dataset
os.makedirs(data_folder, exist_ok=True)
# Read qrels file for relevant positives per query
train_queries = {}
qrels_train = os.path.join(data_folder, "qrels.train.tsv")
if not os.path.exists(qrels_train):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.train.tsv", qrels_train)
with open(qrels_train) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split()
if qid not in translated_qids:
train_queries[qid] = None
# Read all queries
queries_filepath = os.path.join(data_folder, "queries.train.tsv")
if not os.path.exists(queries_filepath):
tar_filepath = os.path.join(data_folder, "queries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download queries.tar.gz")
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/queries.tar.gz", tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
with open(queries_filepath, "r", encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
if qid in train_queries:
train_queries[qid] = query.strip()
qids = [qid for qid in train_queries if train_queries[qid] is not None]
queries = [train_queries[qid] for qid in qids]
# Define our translation model
translation_model = EasyNMT("opus-mt")
print("Start translation of {} queries.".format(len(queries)))
print("This can take a while. But you can stop this script at any point")
with open(output_filename, "a" if os.path.exists(output_filename) else "w", encoding="utf8") as fOut:
for qid, query, translated_query in zip(
qids,
queries,
translation_model.translate_stream(
queries,
source_lang="en",
target_lang=target_lang,
beam_size=2,
perform_sentence_splitting=False,
chunk_size=256,
batch_size=64,
),
):
fOut.write("{}\t{}\n".format(qid, translated_query.replace("\t", " ")))
fOut.flush()
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseRerankingEvaluator(RerankingEvaluator):
def __init__(
self,
samples: list[dict[str, str | list[str]]],
at_k: int = 10,
name: str = "",
write_csv: bool = True,
similarity_fct: Callable[[Tensor, Tensor], Tensor] = cos_sim,
batch_size: int = 64,
show_progress_bar: bool = False,
use_batched_encoding: bool = True,
truncate_dim: int | None = None,
mrr_at_k: int | None = None,
):
return super().__init__(
samples=samples,
at_k=at_k,
name=name,
write_csv=write_csv,
similarity_fct=similarity_fct,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
use_batched_encoding=use_batched_encoding,
truncate_dim=truncate_dim,
mrr_at_k=mrr_at_k,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder):
return super().compute_metrices(model)
def compute_metrices_batched(self, model: SparseEncoder):
return super().compute_metrices_batched(model)
def compute_metrices_individual(self, model: SparseEncoder):
return super().compute_metrices_individual(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
show_progress_bar: bool | None = None,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=show_progress_bar,
convert_to_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Callable
from sentence_transformers.evaluation import RerankingEvaluator
from sentence_transformers.util import cos_sim
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseRerankingEvaluator(RerankingEvaluator):
def __init__(
self,
samples: list[dict[str, str | list[str]]],
at_k: int = 10,
name: str = "",
write_csv: bool = True,
similarity_fct: Callable[[Tensor, Tensor], Tensor] = cos_sim,
batch_size: int = 64,
show_progress_bar: bool = False,
use_batched_encoding: bool = True,
truncate_dim: int | None = None,
mrr_at_k: int | None = None,
):
super().__init__(
samples=samples,
at_k=at_k,
name=name,
write_csv=write_csv,
similarity_fct=similarity_fct,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
use_batched_encoding=use_batched_encoding,
truncate_dim=truncate_dim,
mrr_at_k=mrr_at_k,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def compute_metrices(
self,
model: SparseEncoder,
):
return super().compute_metrices(model)
def compute_metrices_batched(
self,
model: SparseEncoder,
):
return super().compute_metrices_batched(model)
def compute_metrices_individual(
self,
model: SparseEncoder,
):
return super().compute_metrices_individual(model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
show_progress_bar: bool | None = None,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=show_progress_bar,
convert_to_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
import os
from typing import Tuple
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.basic_models import run_custom_objective
class TestGPUBasicModels:
def run_cls(self, X: np.ndarray, y: np.ndarray) -> Tuple[int, int]:
cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
cls.fit(X, y)
cls.get_booster().save_model("test_deterministic_gpu_hist-0.json")
cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
cls.fit(X, y)
cls.get_booster().save_model("test_deterministic_gpu_hist-1.json")
with open("test_deterministic_gpu_hist-0.json", "r") as fd:
model_0 = fd.read()
with open("test_deterministic_gpu_hist-1.json", "r") as fd:
model_1 = fd.read()
os.remove("test_deterministic_gpu_hist-0.json")
os.remove("test_deterministic_gpu_hist-1.json")
return hash(model_0), hash(model_1)
def test_custom_objective(self) -> None:
dtrain, dtest = tm.load_agaricus(__file__)
run_custom_objective("hist", "cuda", dtrain, dtest)
def test_deterministic_gpu_hist(self) -> None:
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows)
model_0, model_1 = self.run_cls(X, y)
assert model_0 == model_1
@pytest.mark.skipif(**tm.no_sklearn())
def test_invalid_gpu_id(self) -> None:
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True)
# should pass with invalid gpu id
cls1 = xgb.XGBClassifier(tree_method="hist", device="cuda:9999")
cls1.fit(X, y)
# should throw error with fail_on_invalid_gpu_id enabled
cls2 = xgb.XGBClassifier(
tree_method="hist", device="cuda:9999", fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
cls2 = xgb.XGBClassifier(
tree_method="hist", device="cuda:9999", fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
|
import os
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
sys.path.append("tests/python")
import test_basic_models as test_bm
# Don't import the test class, otherwise they will run twice.
import test_callback as test_cb # noqa
rng = np.random.RandomState(1994)
class TestGPUBasicModels:
cpu_test_cb = test_cb.TestCallbacks()
cpu_test_bm = test_bm.TestModels()
def run_cls(self, X, y):
cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
cls.fit(X, y)
cls.get_booster().save_model("test_deterministic_gpu_hist-0.json")
cls = xgb.XGBClassifier(tree_method="hist", device="cuda")
cls.fit(X, y)
cls.get_booster().save_model("test_deterministic_gpu_hist-1.json")
with open("test_deterministic_gpu_hist-0.json", "r") as fd:
model_0 = fd.read()
with open("test_deterministic_gpu_hist-1.json", "r") as fd:
model_1 = fd.read()
os.remove("test_deterministic_gpu_hist-0.json")
os.remove("test_deterministic_gpu_hist-1.json")
return hash(model_0), hash(model_1)
def test_custom_objective(self):
self.cpu_test_bm.run_custom_objective("gpu_hist")
def test_eta_decay(self):
self.cpu_test_cb.run_eta_decay("gpu_hist")
@pytest.mark.parametrize(
"objective", ["binary:logistic", "reg:absoluteerror", "reg:quantileerror"]
)
def test_eta_decay_leaf_output(self, objective) -> None:
self.cpu_test_cb.run_eta_decay_leaf_output("gpu_hist", objective)
def test_deterministic_gpu_hist(self):
kRows = 1000
kCols = 64
kClasses = 4
# Create large values to force rounding.
X = np.random.randn(kRows, kCols) * 1e4
y = np.random.randint(0, kClasses, size=kRows)
model_0, model_1 = self.run_cls(X, y)
assert model_0 == model_1
@pytest.mark.skipif(**tm.no_sklearn())
def test_invalid_gpu_id(self):
from sklearn.datasets import load_digits
X, y = load_digits(return_X_y=True)
# should pass with invalid gpu id
cls1 = xgb.XGBClassifier(tree_method="gpu_hist", gpu_id=9999)
cls1.fit(X, y)
# should throw error with fail_on_invalid_gpu_id enabled
cls2 = xgb.XGBClassifier(
tree_method="gpu_hist", gpu_id=9999, fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
cls2 = xgb.XGBClassifier(
tree_method="hist", device="cuda:9999", fail_on_invalid_gpu_id=True
)
with pytest.raises(ValueError, match="ordinal 9999 is invalid"):
cls2.fit(X, y)
|
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, e.g. from "CXCYWH" to "XYXY".
.. betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertDtype(Transform):
"""[BETA] Convert input image or video to the given ``dtype`` and scale the values accordingly.
.. betastatus:: ConvertDtype transform
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBox(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. betastatus:: ClampBoundingBox transform
"""
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertDtype(Transform):
"""[BETA] Convert a tensor image/box/mask to the given ``dtype`` and scale the values accordingly
.. betastatus:: ConvertDtype transform
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints._TensorImageType, datapoints._TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBox(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(inpt, np.ndarray):
output = torch.from_numpy(inpt).permute((2, 0, 1)).contiguous()
elif isinstance(inpt, PIL.Image.Image):
output = pil_to_tensor(inpt)
elif isinstance(inpt, torch.Tensor):
output = inpt
else:
raise TypeError(f"Input can either be a numpy array or a PIL image, but got {type(inpt)} instead.")
return features.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
from typing import Union
import numpy as np
import PIL.Image
import torch
from torchvision.prototype import features
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_image_tensor(image: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(image, np.ndarray):
output = torch.from_numpy(image).permute((2, 0, 1)).contiguous()
elif isinstance(image, PIL.Image.Image):
output = pil_to_tensor(image)
else: # isinstance(inpt, torch.Tensor):
output = image
return features.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
"""LLMResult class."""
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.run_info import RunInfo
class LLMResult(BaseModel):
"""A container for results of an LLM call.
Both chat models and LLMs generate an LLMResult object. This object contains
the generated outputs and any additional information that the model provider
wants to return.
"""
generations: list[
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
]
"""Generated outputs.
The first dimension of the list represents completions for different input
prompts.
The second dimension of the list represents different candidate generations
for a given prompt.
When returned from an LLM the type is list[list[Generation]].
When returned from a chat model the type is list[list[ChatGeneration]].
ChatGeneration is a subclass of Generation that has a field for a structured
chat message.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.
"""
run: Optional[list[RunInfo]] = None
"""List of metadata info for model call for each input."""
type: Literal["LLMResult"] = "LLMResult" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def flatten(self) -> list[LLMResult]:
"""Flatten generations into a single list.
Unpack list[list[Generation]] -> list[LLMResult] where each returned LLMResult
contains only a single Generation. If token usage information is available,
it is kept only for the LLMResult corresponding to the top-choice
Generation, to avoid over-counting of token usage downstream.
Returns:
List of LLMResults where each returned LLMResult contains a single
Generation.
"""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = deepcopy(self.llm_output)
llm_output["token_usage"] = {}
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
|
"""LLMResult class."""
from __future__ import annotations
from copy import deepcopy
from typing import Literal, Optional, Union
from pydantic import BaseModel
from langchain_core.outputs.chat_generation import ChatGeneration, ChatGenerationChunk
from langchain_core.outputs.generation import Generation, GenerationChunk
from langchain_core.outputs.run_info import RunInfo
class LLMResult(BaseModel):
"""A container for results of an LLM call.
Both chat models and LLMs generate an LLMResult object. This object contains
the generated outputs and any additional information that the model provider
wants to return.
"""
generations: list[
list[Union[Generation, ChatGeneration, GenerationChunk, ChatGenerationChunk]]
]
"""Generated outputs.
The first dimension of the list represents completions for different input
prompts.
The second dimension of the list represents different candidate generations
for a given prompt.
When returned from an LLM the type is List[List[Generation]].
When returned from a chat model the type is List[List[ChatGeneration]].
ChatGeneration is a subclass of Generation that has a field for a structured
chat message.
"""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output.
This dictionary is a free-form dictionary that can contain any information that the
provider wants to return. It is not standardized and is provider-specific.
Users should generally avoid relying on this field and instead rely on
accessing relevant information from standardized fields present in
AIMessage.
"""
run: Optional[list[RunInfo]] = None
"""List of metadata info for model call for each input."""
type: Literal["LLMResult"] = "LLMResult" # type: ignore[assignment]
"""Type is used exclusively for serialization purposes."""
def flatten(self) -> list[LLMResult]:
"""Flatten generations into a single list.
Unpack List[List[Generation]] -> List[LLMResult] where each returned LLMResult
contains only a single Generation. If token usage information is available,
it is kept only for the LLMResult corresponding to the top-choice
Generation, to avoid over-counting of token usage downstream.
Returns:
List of LLMResults where each returned LLMResult contains a single
Generation.
"""
llm_results = []
for i, gen_list in enumerate(self.generations):
# Avoid double counting tokens in OpenAICallback
if i == 0:
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=self.llm_output,
)
)
else:
if self.llm_output is not None:
llm_output = deepcopy(self.llm_output)
llm_output["token_usage"] = {}
else:
llm_output = None
llm_results.append(
LLMResult(
generations=[gen_list],
llm_output=llm_output,
)
)
return llm_results
def __eq__(self, other: object) -> bool:
"""Check for LLMResult equality by ignoring any metadata related to runs."""
if not isinstance(other, LLMResult):
return NotImplemented
return (
self.generations == other.generations
and self.llm_output == other.llm_output
)
|
from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = get_session().get(
url=dataset_viewer_parquet_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, revision: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = get_session().get(
url=dataset_viewer_info_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == revision or revision is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported dataset infos available.")
|
from typing import Any, Dict, List, Optional, Union
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
http_get,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = http_get(
url=dataset_viewer_parquet_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
max_retries=3,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, revision: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = http_get(
url=dataset_viewer_info_url + dataset,
temp_file=None,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
max_retries=3,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == revision or revision is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported dataset infos available.")
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (896, 896)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=img_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=img_size),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_size,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.04,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[8, 11])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=12)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_size = (896, 896)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=img_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=img_size),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_size,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=img_size),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer_config = dict(grad_clip=None)
optimizer = dict(
type='SGD',
lr=0.04,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[8, 11])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=12)
# NOTE: This variable is for automatically scaling LR,
# USER SHOULD NOT CHANGE THIS VALUE.
default_batch_size = 32 # (8 GPUs) x (4 samples per GPU)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export("keras.optimizers.SGD")
class SGD(optimizer.Optimizer):
"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.01`.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations. 0 is vanilla
gradient descent. Defaults to `0.0`.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="SGD",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if not isinstance(momentum, float) or momentum < 0 or momentum > 1:
raise ValueError("`momentum` must be a float between [0, 1].")
self.momentum = momentum
self.nesterov = nesterov
def build(self, variables):
"""Initialize optimizer variables.
SGD optimizer has one variable `momentums`, only set if `self.momentum`
is not 0.
Args:
var_list: list of model variables to build SGD variables on.
"""
if self.built:
return
super().build(variables)
self.momentums = []
if self.momentum != 0:
self.momentums = self.add_optimizer_variables(variables, "momentum")
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = None
if self.momentum != 0:
m = self.momentums[self._get_variable_index(variable)]
if m is not None:
momentum = ops.cast(self.momentum, variable.dtype)
self.assign(
m,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
if self.nesterov:
self.assign_add(
variable,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
else:
self.assign_add(variable, m)
else:
self.assign_sub(variable, ops.multiply(gradient, learning_rate))
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
SGD.__doc__ = SGD.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export("keras.optimizers.SGD")
class SGD(optimizer.Optimizer):
"""Gradient descent (with momentum) optimizer.
Update rule for parameter `w` with gradient `g` when `momentum` is 0:
```python
w = w - learning_rate * g
```
Update rule when `momentum` is larger than 0:
```python
velocity = momentum * velocity - learning_rate * g
w = w + velocity
```
When `nesterov=True`, this rule becomes:
```python
velocity = momentum * velocity - learning_rate * g
w = w + momentum * velocity - learning_rate * g
```
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.01`.
momentum: float hyperparameter >= 0 that accelerates gradient descent in
the relevant direction and dampens oscillations. 0 is vanilla
gradient descent. Defaults to `0.0`.
nesterov: boolean. Whether to apply Nesterov momentum.
Defaults to `False`.
{{base_optimizer_keyword_args}}
"""
def __init__(
self,
learning_rate=0.01,
momentum=0.0,
nesterov=False,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="SGD",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
if not isinstance(momentum, float) or momentum < 0 or momentum > 1:
raise ValueError("`momentum` must be a float between [0, 1].")
self.momentum = momentum
self.nesterov = nesterov
def build(self, variables):
"""Initialize optimizer variables.
SGD optimizer has one variable `momentums`, only set if `self.momentum`
is not 0.
Args:
var_list: list of model variables to build SGD variables on.
"""
if self.built:
return
super().build(variables)
self.momentums = []
if self.momentum != 0:
for variable in variables:
self.momentums.append(
self.add_variable_from_reference(
reference_variable=variable, name="momentum"
)
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
learning_rate = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
m = None
if self.momentum != 0:
m = self.momentums[self._get_variable_index(variable)]
if m is not None:
momentum = ops.cast(self.momentum, variable.dtype)
self.assign(
m,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
if self.nesterov:
self.assign_add(
variable,
ops.subtract(
ops.multiply(m, momentum),
ops.multiply(gradient, learning_rate),
),
)
else:
self.assign_add(variable, m)
else:
self.assign_sub(variable, ops.multiply(gradient, learning_rate))
def get_config(self):
config = super().get_config()
config.update(
{
"momentum": self.momentum,
"nesterov": self.nesterov,
}
)
return config
SGD.__doc__ = SGD.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
_delete_=True,
type='SABLRetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
norm_cfg=norm_cfg,
bbox_coder=dict(
type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
import torch
from torch import Tensor
class ImageList:
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
Args:
tensors (tensor): Tensor containing images.
image_sizes (list[tuple[int, int]]): List of Tuples each containing size of images.
"""
def __init__(self, tensors: Tensor, image_sizes: list[tuple[int, int]]) -> None:
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, device: torch.device) -> "ImageList":
cast_tensor = self.tensors.to(device)
return ImageList(cast_tensor, self.image_sizes)
|
from typing import List, Tuple
import torch
from torch import Tensor
class ImageList:
"""
Structure that holds a list of images (of possibly
varying sizes) as a single tensor.
This works by padding the images to the same size,
and storing in a field the original sizes of each image
Args:
tensors (tensor): Tensor containing images.
image_sizes (list[tuple[int, int]]): List of Tuples each containing size of images.
"""
def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]) -> None:
self.tensors = tensors
self.image_sizes = image_sizes
def to(self, device: torch.device) -> "ImageList":
cast_tensor = self.tensors.to(device)
return ImageList(cast_tensor, self.image_sizes)
|
from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from docarray.math.ndarray import to_list
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions
_ProtoValueType = Optional[Union[bool, float, str, list, dict]]
_StructValueType = Union[
_ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]
]
_MetadataType = Dict[str, _StructValueType]
def _convert_ndarray_to_list(v: 'ArrayType'):
if v is not None:
return to_list(v)
class _NamedScore(BaseModel):
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
class _MetadataModel(BaseModel):
metadata: _MetadataType
class PydanticDocument(BaseModel):
id: Optional[str]
parent_id: Optional[str]
granularity: Optional[int]
adjacency: Optional[int]
blob: Optional[str]
tensor: Optional[Any]
mime_type: Optional[str]
text: Optional[str]
weight: Optional[float]
uri: Optional[str]
tags: Optional[Dict[str, '_StructValueType']]
_metadata: Optional[Dict[str, '_StructValueType']]
offset: Optional[float]
location: Optional[List[float]]
embedding: Optional[Any]
modality: Optional[str]
evaluations: Optional[Dict[str, '_NamedScore']]
scores: Optional[Dict[str, '_NamedScore']]
chunks: Optional[List['PydanticDocument']]
matches: Optional[List['PydanticDocument']]
_tensor2list = validator('tensor', allow_reuse=True)(_convert_ndarray_to_list)
_embedding2list = validator('embedding', allow_reuse=True)(_convert_ndarray_to_list)
class Config:
smart_union = True
def __init__(self, **data):
super().__init__(**data)
# underscore attributes need to be set and validated manually
_metadata = data.get('_metadata', None)
if _metadata is not None:
_md_model = _MetadataModel(metadata=_metadata) # validate _metadata
object.__setattr__(self, '_metadata', _md_model.metadata)
PydanticDocument.update_forward_refs()
PydanticDocumentArray = List[PydanticDocument]
|
from typing import Optional, List, Dict, Any, TYPE_CHECKING, Union
from pydantic import BaseModel, validator
from docarray.math.ndarray import to_list
if TYPE_CHECKING:
from docarray.typing import ArrayType
# this order must be preserved: https://pydantic-docs.helpmanual.io/usage/types/#unions
_ProtoValueType = Optional[Union[bool, float, str, list, dict]]
_StructValueType = Union[
_ProtoValueType, List[_ProtoValueType], Dict[str, _ProtoValueType]
]
_MetadataType = Dict[str, _StructValueType]
def _convert_ndarray_to_list(v: 'ArrayType'):
if v is not None:
return to_list(v)
class _NamedScore(BaseModel):
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
class _MetadataModel(BaseModel):
metadata: _MetadataType
class PydanticDocument(BaseModel):
id: Optional[str]
parent_id: Optional[str]
granularity: Optional[int]
adjacency: Optional[int]
blob: Optional[str]
tensor: Optional[Any]
mime_type: Optional[str]
text: Optional[str]
weight: Optional[float]
uri: Optional[str]
tags: Optional[Dict[str, '_StructValueType']]
_metadata: Optional[Dict[str, '_StructValueType']]
offset: Optional[float]
location: Optional[List[float]]
embedding: Optional[Any]
modality: Optional[str]
evaluations: Optional[Dict[str, '_NamedScore']]
scores: Optional[Dict[str, '_NamedScore']]
chunks: Optional[List['PydanticDocument']]
matches: Optional[List['PydanticDocument']]
_tensor2list = validator('tensor', allow_reuse=True)(_convert_ndarray_to_list)
_embedding2list = validator('embedding', allow_reuse=True)(_convert_ndarray_to_list)
class Config:
smart_union = True
def __init__(self, **data):
super().__init__(**data)
# underscore attributes need to be set and validated manually
_metadata = data.get('_metadata', None)
if _metadata is not None:
_md_model = _MetadataModel(metadata=_metadata) # validate _metadata
object.__setattr__(self, '_metadata', _md_model.metadata)
PydanticDocument.update_forward_refs()
PydanticDocumentArray = List[PydanticDocument]
|
_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model setting
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(clip_grad=None)
|
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model setting
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
bbox_head=dict(
norm_on_bbox=True,
centerness_on_reg=True,
dcn_on_last_conv=False,
center_sampling=True,
conv_bias=True,
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
test_cfg=dict(nms=dict(type='nms', iou_threshold=0.6)))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(clip_grad=None)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry, build_runner_from_cfg
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage evaluator
EVALUATOR = Registry('evaluator')
|
from ._transforms import BarkScale, BarkSpectrogram, ChromaScale, ChromaSpectrogram, InverseBarkScale
__all__ = [
"BarkScale",
"BarkSpectrogram",
"ChromaScale",
"ChromaSpectrogram",
"InverseBarkScale",
]
|
from ._transforms import BarkScale, BarkSpectrogram, InverseBarkScale
__all__ = [
"BarkScale",
"BarkSpectrogram",
"InverseBarkScale",
]
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTranslationEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
"""
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
"""
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTranslationEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
|
"""Tool for interacting with a single API with natural language definition."""
from __future__ import annotations
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_community.chains.openapi.chain import OpenAPIEndpointChain
from langchain_community.tools.openapi.utils.api_models import APIOperation
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_community.utilities.requests import Requests
class NLATool(Tool):
"""Natural Language API Tool."""
@classmethod
def from_open_api_endpoint_chain(
cls, chain: OpenAPIEndpointChain, api_title: str
) -> "NLATool":
"""Convert an endpoint chain to an API endpoint tool.
Args:
chain: The endpoint chain.
api_title: The title of the API.
Returns:
The API endpoint tool.
"""
expanded_name = (
f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}"
)
description = (
f"I'm an AI from {api_title}. Instruct what you want,"
" and I'll assist via an API with description:"
f" {chain.api_operation.description}"
)
return cls(name=expanded_name, func=chain.run, description=description)
@classmethod
def from_llm_and_method(
cls,
llm: BaseLanguageModel,
path: str,
method: str,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
**kwargs: Any,
) -> "NLATool":
"""Instantiate the tool from the specified path and method.
Args:
llm: The language model to use.
path: The path of the API.
method: The method of the API.
spec: The OpenAPI spec.
requests: Optional requests object. Default is None.
verbose: Whether to print verbose output. Default is False.
return_intermediate_steps: Whether to return intermediate steps.
Default is False.
kwargs: Additional arguments.
Returns:
The tool.
"""
api_operation = APIOperation.from_openapi_spec(spec, path, method)
chain = OpenAPIEndpointChain.from_api_operation(
api_operation,
llm,
requests=requests,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
return cls.from_open_api_endpoint_chain(chain, spec.info.title)
|
"""Tool for interacting with a single API with natural language definition."""
from __future__ import annotations
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_community.chains.openapi.chain import OpenAPIEndpointChain
from langchain_community.tools.openapi.utils.api_models import APIOperation
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_community.utilities.requests import Requests
class NLATool(Tool): # type: ignore[override]
"""Natural Language API Tool."""
@classmethod
def from_open_api_endpoint_chain(
cls, chain: OpenAPIEndpointChain, api_title: str
) -> "NLATool":
"""Convert an endpoint chain to an API endpoint tool.
Args:
chain: The endpoint chain.
api_title: The title of the API.
Returns:
The API endpoint tool.
"""
expanded_name = (
f"{api_title.replace(' ', '_')}.{chain.api_operation.operation_id}"
)
description = (
f"I'm an AI from {api_title}. Instruct what you want,"
" and I'll assist via an API with description:"
f" {chain.api_operation.description}"
)
return cls(name=expanded_name, func=chain.run, description=description)
@classmethod
def from_llm_and_method(
cls,
llm: BaseLanguageModel,
path: str,
method: str,
spec: OpenAPISpec,
requests: Optional[Requests] = None,
verbose: bool = False,
return_intermediate_steps: bool = False,
**kwargs: Any,
) -> "NLATool":
"""Instantiate the tool from the specified path and method.
Args:
llm: The language model to use.
path: The path of the API.
method: The method of the API.
spec: The OpenAPI spec.
requests: Optional requests object. Default is None.
verbose: Whether to print verbose output. Default is False.
return_intermediate_steps: Whether to return intermediate steps.
Default is False.
kwargs: Additional arguments.
Returns:
The tool.
"""
api_operation = APIOperation.from_openapi_spec(spec, path, method)
chain = OpenAPIEndpointChain.from_api_operation(
api_operation,
llm,
requests=requests,
verbose=verbose,
return_intermediate_steps=return_intermediate_steps,
**kwargs,
)
return cls.from_open_api_endpoint_chain(chain, spec.info.title)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8429 Spearman: 0.8366
Model Sparsity: Active Dimensions: 78.3, Sparsity Ratio: 0.9974
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8366
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8430 Spearman: 0.8368
Model Sparsity: Active Dimensions: 81.1, Sparsity Ratio: 0.9973
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import os
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.openai.base import ChatMessage, MessageRole
from llama_index.llms.asi import ASI
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in ASI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
@patch.dict(os.environ, {}, clear=True)
def test_initialization():
"""Test initialization with API key."""
# Test with explicit API key
llm = ASI(api_key="test_key")
assert llm.api_key == "test_key"
# Test with missing API key
with pytest.raises(
ValueError,
match="Must specify `api_key` or set environment variable `ASI_API_KEY`",
):
ASI(api_key=None)
@patch("llama_index.llms.asi.base.ASI.stream_complete")
def test_stream_complete(mock_stream_complete):
"""Test stream_complete method."""
# Create a mock response
mock_chunk = MagicMock()
mock_chunk.text = "Test response"
# Set up the mock to return our test chunk
mock_stream_complete.return_value = [mock_chunk]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_complete
stream = llm.stream_complete("Test prompt")
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 1
assert chunks[0].text == "Test response"
# Verify that stream_complete was called with the right arguments
mock_stream_complete.assert_called_once_with("Test prompt")
@patch("llama_index.llms.asi.base.ASI.stream_chat")
def test_stream_chat_with_content(mock_stream_chat):
"""Test stream_chat method with content in delta."""
# Create a mock for the response
mock_chunk = MagicMock()
mock_chunk.delta = "Test content"
# Set up the mock to return our test chunk
mock_stream_chat.return_value = [mock_chunk]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_chat
messages = [ChatMessage(role=MessageRole.USER, content="Test message")]
stream = llm.stream_chat(messages)
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 1
assert chunks[0].delta == "Test content"
# Verify that stream_chat was called with the right arguments
mock_stream_chat.assert_called_once_with(messages)
@patch("llama_index.llms.asi.base.ASI.stream_chat")
def test_stream_chat_empty_content(mock_stream_chat):
"""Test stream_chat method with empty content in delta."""
# Create mock chunks for the response
mock_chunk1 = MagicMock()
mock_chunk1.delta = "Thinking..."
mock_chunk2 = MagicMock()
mock_chunk2.delta = "Final response"
# Set up the mock to return our test chunks
mock_stream_chat.return_value = [mock_chunk1, mock_chunk2]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_chat
messages = [ChatMessage(role=MessageRole.USER, content="Test message")]
stream = llm.stream_chat(messages)
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 2
assert chunks[0].delta == "Thinking..."
assert chunks[1].delta == "Final response"
# Verify that stream_chat was called with the right arguments
mock_stream_chat.assert_called_once_with(messages)
@patch("llama_index.llms.asi.base.ASI.stream_chat")
def test_stream_chat_init_thought(mock_stream_chat):
"""Test stream_chat method with init_thought field."""
# Create a mock for the response
mock_chunk = MagicMock()
mock_chunk.delta = "Initial thinking..."
# Set up the mock to return our test chunk
mock_stream_chat.return_value = [mock_chunk]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_chat
messages = [ChatMessage(role=MessageRole.USER, content="Test message")]
stream = llm.stream_chat(messages)
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 1
assert chunks[0].delta == "Initial thinking..."
# Verify that stream_chat was called with the right arguments
mock_stream_chat.assert_called_once_with(messages)
|
import os
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.openai.base import ChatMessage, MessageRole
from llama_index.llms.asi import ASI
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in ASI.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
@patch.dict(os.environ, {}, clear=True)
def test_initialization():
"""Test initialization with API key."""
# Test with explicit API key
llm = ASI(api_key="test_key")
assert llm.api_key == "test_key"
# Test with missing API key
with pytest.raises(
ValueError,
match="Must specify `api_key` or set environment variable " "`ASI_API_KEY`",
):
ASI(api_key=None)
@patch("llama_index.llms.asi.base.ASI.stream_complete")
def test_stream_complete(mock_stream_complete):
"""Test stream_complete method."""
# Create a mock response
mock_chunk = MagicMock()
mock_chunk.text = "Test response"
# Set up the mock to return our test chunk
mock_stream_complete.return_value = [mock_chunk]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_complete
stream = llm.stream_complete("Test prompt")
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 1
assert chunks[0].text == "Test response"
# Verify that stream_complete was called with the right arguments
mock_stream_complete.assert_called_once_with("Test prompt")
@patch("llama_index.llms.asi.base.ASI.stream_chat")
def test_stream_chat_with_content(mock_stream_chat):
"""Test stream_chat method with content in delta."""
# Create a mock for the response
mock_chunk = MagicMock()
mock_chunk.delta = "Test content"
# Set up the mock to return our test chunk
mock_stream_chat.return_value = [mock_chunk]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_chat
messages = [ChatMessage(role=MessageRole.USER, content="Test message")]
stream = llm.stream_chat(messages)
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 1
assert chunks[0].delta == "Test content"
# Verify that stream_chat was called with the right arguments
mock_stream_chat.assert_called_once_with(messages)
@patch("llama_index.llms.asi.base.ASI.stream_chat")
def test_stream_chat_empty_content(mock_stream_chat):
"""Test stream_chat method with empty content in delta."""
# Create mock chunks for the response
mock_chunk1 = MagicMock()
mock_chunk1.delta = "Thinking..."
mock_chunk2 = MagicMock()
mock_chunk2.delta = "Final response"
# Set up the mock to return our test chunks
mock_stream_chat.return_value = [mock_chunk1, mock_chunk2]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_chat
messages = [ChatMessage(role=MessageRole.USER, content="Test message")]
stream = llm.stream_chat(messages)
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 2
assert chunks[0].delta == "Thinking..."
assert chunks[1].delta == "Final response"
# Verify that stream_chat was called with the right arguments
mock_stream_chat.assert_called_once_with(messages)
@patch("llama_index.llms.asi.base.ASI.stream_chat")
def test_stream_chat_init_thought(mock_stream_chat):
"""Test stream_chat method with init_thought field."""
# Create a mock for the response
mock_chunk = MagicMock()
mock_chunk.delta = "Initial thinking..."
# Set up the mock to return our test chunk
mock_stream_chat.return_value = [mock_chunk]
# Create the ASI instance
llm = ASI(api_key="test_key")
# Call stream_chat
messages = [ChatMessage(role=MessageRole.USER, content="Test message")]
stream = llm.stream_chat(messages)
# Check that we get the expected response
chunks = list(stream)
assert len(chunks) == 1
assert chunks[0].delta == "Initial thinking..."
# Verify that stream_chat was called with the right arguments
mock_stream_chat.assert_called_once_with(messages)
|
from functools import wraps
from typing import TYPE_CHECKING, List
from jina.excepts import FlowBuildLevelError
# noinspection PyUnreachableCode
if TYPE_CHECKING: # pragma: no cover
from jina.enums import FlowBuildLevel
from jina.orchestrate.flow.base import Flow
def allowed_levels(levels: List['FlowBuildLevel']):
"""Annotate a function so that it requires certain build level to run.
Example:
.. highlight:: python
.. code-block:: python
@build_required(FlowBuildLevel.RUNTIME)
def foo():
print(1)
:param levels: required build level to run this function.
:return: annotated function
"""
def __build_level(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if hasattr(self, '_build_level'):
if self._build_level in levels:
return func(self, *args, **kwargs)
else:
raise FlowBuildLevelError(
f'build_level check failed for {func!r}, required level: {levels}, actual level: {self._build_level}'
)
else:
raise AttributeError(f'{self!r} has no attribute "_build_level"')
return arg_wrapper
return __build_level
def _hanging_deployments(op_flow: 'Flow') -> List[str]:
"""
:param op_flow: the Flow we're operating on
:return: names of floating Deployments (nobody recv from them) in the Flow.
"""
all_needs = {k for p, v in op_flow for k in v.needs}
all_names = {p for p, v in op_flow if not v.args.floating}
# all_names is always a superset of all_needs
return list(all_names.difference(all_needs))
|
from functools import wraps
from typing import TYPE_CHECKING, List
from jina.excepts import FlowBuildLevelError
# noinspection PyUnreachableCode
if TYPE_CHECKING: # pragma: no cover
from jina.enums import FlowBuildLevel
from jina.orchestrate.flow.base import Flow
def allowed_levels(levels: List['FlowBuildLevel']):
"""Annotate a function so that it requires certain build level to run.
Example:
.. highlight:: python
.. code-block:: python
@build_required(FlowBuildLevel.RUNTIME)
def foo():
print(1)
:param levels: required build level to run this function.
:return: annotated function
"""
def __build_level(func):
@wraps(func)
def arg_wrapper(self, *args, **kwargs):
if hasattr(self, '_build_level'):
if self._build_level in levels:
return func(self, *args, **kwargs)
else:
raise FlowBuildLevelError(
f'build_level check failed for {func!r}, required level: {levels}, actual level: {self._build_level}'
)
else:
raise AttributeError(f'{self!r} has no attribute "_build_level"')
return arg_wrapper
return __build_level
def _hanging_deployments(op_flow: 'Flow') -> List[str]:
"""
:param op_flow: the Flow we're operating on
:return: names of floating Deployments (nobody recv from them) in the Flow.
"""
all_needs = {k for p, v in op_flow for k in v.needs}
all_names = {p for p, v in op_flow if not v.args.floating}
# all_names is always a superset of all_needs
return list(all_names.difference(all_needs))
|
from llama_index_instrumentation.span_handlers.simple import SimpleSpanHandler # noqa
|
import inspect
from typing import Any, Dict, cast, List, Optional, TYPE_CHECKING
from llama_index.core.instrumentation.span.simple import SimpleSpan
from llama_index.core.instrumentation.span_handlers.base import BaseSpanHandler
from datetime import datetime
from functools import reduce
import warnings
if TYPE_CHECKING:
from treelib import Tree
class SimpleSpanHandler(BaseSpanHandler[SimpleSpan]):
"""Span Handler that manages SimpleSpan's."""
def class_name(cls) -> str:
"""Class name."""
return "SimpleSpanHandler"
def new_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
parent_span_id: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> SimpleSpan:
"""Create a span."""
return SimpleSpan(id_=id_, parent_id=parent_span_id, tags=tags or {})
def prepare_to_exit_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
result: Optional[Any] = None,
**kwargs: Any,
) -> SimpleSpan:
"""Logic for preparing to drop a span."""
span = self.open_spans[id_]
span = cast(SimpleSpan, span)
span.end_time = datetime.now()
span.duration = (span.end_time - span.start_time).total_seconds()
with self.lock:
self.completed_spans += [span]
return span
def prepare_to_drop_span(
self,
id_: str,
bound_args: inspect.BoundArguments,
instance: Optional[Any] = None,
err: Optional[BaseException] = None,
**kwargs: Any,
) -> Optional[SimpleSpan]:
"""Logic for droppping a span."""
if id_ in self.open_spans:
with self.lock:
span = self.open_spans[id_]
span.metadata = {"error": str(err)}
self.dropped_spans += [span]
return span
return None
def _get_parents(self) -> List[SimpleSpan]:
"""Helper method to get all parent/root spans."""
all_spans = self.completed_spans + self.dropped_spans
return [s for s in all_spans if s.parent_id is None]
def _build_tree_by_parent(
self, parent: SimpleSpan, acc: List[SimpleSpan], spans: List[SimpleSpan]
) -> List[SimpleSpan]:
"""Builds the tree by parent root."""
if not spans:
return acc
children = [s for s in spans if s.parent_id == parent.id_]
if not children:
return acc
updated_spans = [s for s in spans if s not in children]
children_trees = [
self._build_tree_by_parent(
parent=c, acc=[c], spans=[s for s in updated_spans if c != s]
)
for c in children
]
return acc + reduce(lambda x, y: x + y, children_trees)
def _get_trace_trees(self) -> List["Tree"]:
"""Method for getting trace trees."""
try:
from treelib import Tree
except ImportError as e:
raise ImportError(
"`treelib` package is missing. Please install it by using "
"`pip install treelib`."
)
all_spans = self.completed_spans + self.dropped_spans
for s in all_spans:
if s.parent_id is None:
continue
if not any(ns.id_ == s.parent_id for ns in all_spans):
warnings.warn(f"Parent with id {s.parent_id} missing from spans")
s.parent_id += "-MISSING"
all_spans.append(SimpleSpan(id_=s.parent_id, parent_id=None))
parents = self._get_parents()
span_groups = []
for p in parents:
this_span_group = self._build_tree_by_parent(
parent=p, acc=[p], spans=[s for s in all_spans if s != p]
)
sorted_span_group = sorted(this_span_group, key=lambda x: x.start_time)
span_groups.append(sorted_span_group)
trees = []
tree = Tree()
for grp in span_groups:
for span in grp:
if span.parent_id is None:
# complete old tree unless its empty (i.e., start of loop)
if tree.all_nodes():
trees.append(tree)
# start new tree
tree = Tree()
tree.create_node(
tag=f"{span.id_} ({span.duration})",
identifier=span.id_,
parent=span.parent_id,
data=span.start_time,
)
trees.append(tree)
return trees
def print_trace_trees(self) -> None:
"""Method for viewing trace trees."""
trees = self._get_trace_trees()
for tree in trees:
print(tree.show(stdout=False, sorting=True, key=lambda node: node.data))
print("")
|
from __future__ import annotations
__version__ = "5.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.sampler import DefaultBatchSampler, MultiDatasetDefaultBatchSampler
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"DefaultBatchSampler",
"MultiDatasetDefaultBatchSampler",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "4.2.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
import warnings
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.sampler import DefaultBatchSampler, MultiDatasetDefaultBatchSampler
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
# Globally silence PyTorch sparse CSR tensor beta warning
warnings.filterwarnings("ignore", message="Sparse CSR tensor support is in beta state")
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"DefaultBatchSampler",
"MultiDatasetDefaultBatchSampler",
"mine_hard_negatives",
]
|
from jina import DocumentArray, Flow
from ...clip_text import CLIPTextEncoder
def test_no_documents():
test_docs = DocumentArray()
f = Flow().add(uses=CLIPTextEncoder)
with f:
f.search(test_docs, {})
assert len(test_docs) == 0 # SUCCESS
|
from jina import DocumentArray, Flow
from jinahub.encoder.clip_text import CLIPTextEncoder
def test_no_documents():
test_docs = DocumentArray()
f = Flow().add(uses=CLIPTextEncoder)
with f:
f.search(test_docs, {})
assert len(test_docs) == 0 # SUCCESS
|
import inspect
import re
from hashlib import sha256
from typing import List
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
_EXTENSION_TO_MODULE = {
"csv": ("csv", {}),
"tsv": ("csv", {"sep": "\t"}),
"json": ("json", {}),
"jsonl": ("json", {}),
"parquet": ("parquet", {}),
"txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext[1:]: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext[1:].upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext[1:]: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext[1:].upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
|
import inspect
import re
from hashlib import sha256
from typing import List
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
_EXTENSION_TO_MODULE = {
"csv": ("csv", {}),
"tsv": ("csv", {"sep": "\t"}),
"json": ("json", {}),
"jsonl": ("json", {}),
"parquet": ("parquet", {}),
"txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext[1:]: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext[1:].upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext[1:]: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext[1:].upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
# build the model from a config file and a checkpoint file
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
name='result',
image=img,
data_sample=result,
draw_gt=False,
pred_score_thr=args.score_thr,
show=False)
img = visualizer.get_image()
img = mmcv.imconvert(img, 'bgr', 'rgb')
cv2.imshow('result', img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
register_all_modules()
# build the model from a config file and a checkpoint file
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
name='result',
image=img,
data_sample=result,
draw_gt=False,
pred_score_thr=args.score_thr,
show=False)
img = visualizer.get_image()
img = mmcv.imconvert(img, 'bgr', 'rgb')
cv2.imshow('result', img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
if __name__ == '__main__':
main()
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseMSEEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseMSEEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the InformationRetrievalEvaluator computes different IR metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
|
__all__ = ['filter_docs']
import json
from typing import Dict, List, Union
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the index according to the given filter query.
Filter queries use the same syntax as the MongoDB query language (https://www.mongodb.com/docs/manual/tutorial/query-documents/#specify-conditions-using-query-operators).
You can see a list of the supported operators here (https://www.mongodb.com/docs/manual/reference/operator/query/#std-label-query-selectors)
---
```python
from docarray import DocList, BaseDoc
from docarray.documents import TextDoc, ImageDoc
from docarray.utils.filter import filter_docs
class MyDocument(BaseDoc):
caption: TextDoc
ImageDoc: ImageDoc
price: int
docs = DocList[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
ImageDoc=ImageDoc(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle',
ImageDoc=ImageDoc(url='turtlepic.png'),
price=50,
),
MyDocument(
caption='A couple birdwatching with binoculars',
ImageDoc=ImageDoc(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'ImageDoc__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter_docs(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].ImageDoc.url == 'binocularsphoto.png'
```
---
:param docs: the DocList where to apply the filter
:param query: the query to filter by
:return: A DocList containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils._internal.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocList.__class_getitem__(docs.doc_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
|
__all__ = ['filter_docs']
import json
from typing import Dict, List, Union
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
def filter_docs(
docs: AnyDocArray,
query: Union[str, Dict, List[Dict]],
) -> AnyDocArray:
"""
Filter the Documents in the index according to the given filter query.
---
```python
from docarray import DocList, BaseDoc
from docarray.documents import TextDoc, ImageDoc
from docarray.utils.filter import filter_docs
class MyDocument(BaseDoc):
caption: TextDoc
ImageDoc: ImageDoc
price: int
docs = DocList[MyDocument](
[
MyDocument(
caption='A tiger in the jungle',
ImageDoc=ImageDoc(url='tigerphoto.png'),
price=100,
),
MyDocument(
caption='A swimming turtle',
ImageDoc=ImageDoc(url='turtlepic.png'),
price=50,
),
MyDocument(
caption='A couple birdwatching with binoculars',
ImageDoc=ImageDoc(url='binocularsphoto.png'),
price=30,
),
]
)
query = {
'$and': {
'ImageDoc__url': {'$regex': 'photo'},
'price': {'$lte': 50},
}
}
results = filter_docs(docs, query)
assert len(results) == 1
assert results[0].price == 30
assert results[0].caption == 'A couple birdwatching with binoculars'
assert results[0].ImageDoc.url == 'binocularsphoto.png'
```
---
:param docs: the DocList where to apply the filter
:param query: the query to filter by
:return: A DocList containing the Documents
in `docs` that fulfill the filter conditions in the `query`
"""
from docarray.utils._internal.query_language.query_parser import QueryParser
if query:
query = query if not isinstance(query, str) else json.loads(query)
parser = QueryParser(query)
return DocList.__class_getitem__(docs.doc_type)(
d for d in docs if parser.evaluate(d)
)
else:
return docs
|
import os
from pathlib import Path
import pytest
from jina import Document, DocumentArray, Executor
def test_config():
ranker = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'query_features': ['query'],
'match_features': ['match'],
'relevance_label': 'relevance',
},
)
assert ranker.query_features == ['query']
assert ranker.match_features == ['match']
def test_dump_load_path_not_exist(ranker):
with pytest.raises(ValueError):
ranker.dump(parameters={})
ranker.load(parameters={})
def test_train(ranker, documents_to_train_price_sensitive_model):
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
def test_train_with_categorical_features(
ranker_with_categorical_features, documents_to_train_price_sensitive_model
):
"""Weight field specify the importance of the features."""
ranker_with_categorical_features.train(
docs=documents_to_train_price_sensitive_model
)
assert ranker_with_categorical_features.booster
def test_dump_load(ranker, documents_to_train_price_sensitive_model, tmpdir):
model_path = str(tmpdir) + 'model.txt'
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.dump(parameters={'model_path': model_path})
assert os.path.exists(model_path)
ranker.load({'model_path': model_path})
assert ranker.booster
def test_rank_price_sensitive_model(
ranker,
documents_to_train_price_sensitive_model,
documents_random_brand,
):
"""train the model using price sensitive data, assure higher price get lower relevance score."""
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.rank(documents_random_brand)
for doc in documents_random_brand:
predicted_relevances = []
predicted_ids = []
expected_ids = ['3', '4', '2', '1'] # Price smaller to large.
for match in doc.matches:
predicted_relevances.append(match.scores.get('relevance').value)
predicted_ids.append(match.id)
assert (
predicted_relevances[0]
>= predicted_relevances[1]
>= predicted_relevances[2]
)
assert predicted_ids == expected_ids
def test_rank_empty_docs(ranker):
da = DocumentArray([])
ranker.rank(da)
assert len(da) == 0
def test_train_empty_docs(ranker):
da = DocumentArray([])
ranker.train(da)
assert len(da) == 0
def test_rank_none(ranker):
ranker.rank(None)
def test_train_none(ranker):
ranker.train(None)
|
import os
from pathlib import Path
import pytest
from jina import Executor
def test_config():
ranker = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'query_features': ['query'],
'match_features': ['match'],
'relevance_label': 'relevance',
},
)
assert ranker.query_features == ['query']
assert ranker.match_features == ['match']
def test_dump_load_path_not_exist(ranker):
with pytest.raises(ValueError):
ranker.dump(parameters={})
ranker.load(parameters={})
def test_train(ranker, documents_to_train_price_sensitive_model):
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
def test_train_with_categorical_features(
ranker_with_categorical_features, documents_to_train_price_sensitive_model
):
"""Weight field specify the importance of the features."""
ranker_with_categorical_features.train(
docs=documents_to_train_price_sensitive_model
)
assert ranker_with_categorical_features.booster
def test_dump_load(ranker, documents_to_train_price_sensitive_model, tmpdir):
model_path = str(tmpdir) + 'model.txt'
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.dump(parameters={'model_path': model_path})
assert os.path.exists(model_path)
ranker.load({'model_path': model_path})
assert ranker.booster
def test_rank_price_sensitive_model(
ranker,
documents_to_train_price_sensitive_model,
documents_random_brand,
):
"""train the model using price sensitive data, assure higher price get lower relevance score."""
ranker.train(docs=documents_to_train_price_sensitive_model)
assert ranker.booster
ranker.rank(documents_random_brand)
for doc in documents_random_brand:
predicted_relevances = []
predicted_ids = []
expected_ids = ['3', '4', '2', '1'] # Price smaller to large.
for match in doc.matches:
predicted_relevances.append(match.scores.get('relevance').value)
predicted_ids.append(match.id)
assert (
predicted_relevances[0]
>= predicted_relevances[1]
>= predicted_relevances[2]
)
assert predicted_ids == expected_ids
|
# Owner(s): ["module: dynamo"]
import torch
import torch._dynamo
import torch._dynamo.test_case
import torch._functorch
from torch._dynamo.precompile_context import PrecompileContext
from torch._functorch import config as functorch_config
from torch._functorch._aot_autograd.autograd_cache import (
BundledAOTAutogradCacheArtifact,
)
from torch._inductor.test_case import TestCase as InductorTestCase
from torch.testing._internal.inductor_utils import GPU_TYPE, requires_triton
@functorch_config.patch({"enable_autograd_cache": True})
@functorch_config.patch(
{"bundled_autograd_cache": True}
) # Requires bundledaotautograd cache for now
class PrecompileContextTests(InductorTestCase):
def setUp(self):
"""
Reset all counters and caches before each unit test
"""
super().setUp()
# Clear PrecompileContext cache artifacts
PrecompileContext.clear()
@requires_triton()
def test_basic(self):
"""
Test that after torch.compile, PrecompileContext._new_cache_artifacts length is 1
"""
def simple_function(x):
return x.sin() + x.cos()
compiled_fn = torch.compile(simple_function)
# Run the compiled function
x = torch.randn(10, device=GPU_TYPE, requires_grad=True)
result = compiled_fn(x)
result.sum().backward()
# Check that PrecompileContext._new_cache_artifacts_by_key has length 1
self.assertEqual(len(PrecompileContext._new_cache_artifacts_by_key), 1)
self.assertEqual(len(PrecompileContext._new_cache_artifacts), 0)
result = PrecompileContext.serialize()
assert result is not None
serialized, cache_info = result
self.assertEqual(len(cache_info.precompile_aot_autograd_artifacts), 1)
artifacts = PrecompileContext.deserialize(serialized)
assert artifacts is not None
deserialized = artifacts["precompile_aot_autograd"]
assert len(deserialized) == 1
entry = deserialized[0]
assert isinstance(entry, BundledAOTAutogradCacheArtifact)
entry = entry.after_deserialization()
# Now that we've serialized, there should be no new cache artifacts
self.assertEqual(
len(PrecompileContext._new_cache_artifacts["precompile_aot_autograd"]), 0
)
@requires_triton()
def test_serialize_by_key(self):
"""
Test that after torch.compile, PrecompileContext._new_cache_artifacts length is 1
"""
def simple_function(x):
return x.sin() + x.cos()
compiled_fn = torch.compile(simple_function)
# Run the compiled function
x = torch.randn(10, device=GPU_TYPE, requires_grad=True)
result = compiled_fn(x)
result.sum().backward()
# Check that PrecompileContext._new_cache_artifacts_by_key has length 1
# TODO: the key right now is the AOTAutogradCacheKey, but will be backend_id once
# we have torch._dynamo.package implemented
self.assertEqual(len(PrecompileContext._new_cache_artifacts_by_key), 1)
key = next(iter(PrecompileContext._new_cache_artifacts_by_key.keys()))
result = PrecompileContext.serialize_artifact_by_key(key)
assert isinstance(result, BundledAOTAutogradCacheArtifact)
self.assertEqual(result.key, key)
self.assertEqual(len(PrecompileContext._new_cache_artifacts), 0)
result = PrecompileContext.serialize()
assert result is not None
_, cache_info = result
self.assertEqual(len(cache_info.precompile_aot_autograd_artifacts), 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
# Owner(s): ["module: dynamo"]
import torch
import torch._dynamo
import torch._dynamo.test_case
import torch._functorch
from torch._dynamo.precompile_context import PrecompileContext
from torch._functorch import config as functorch_config
from torch._functorch._aot_autograd.autograd_cache import (
BundledAOTAutogradCacheArtifact,
BundledAOTAutogradCacheEntry,
)
from torch._inductor.test_case import TestCase as InductorTestCase
from torch.testing._internal.inductor_utils import GPU_TYPE, requires_triton
@functorch_config.patch({"enable_autograd_cache": True})
@functorch_config.patch(
{"bundled_autograd_cache": True}
) # Requires bundledaotautograd cache for now
class PrecompileContextTests(InductorTestCase):
def setUp(self):
"""
Reset all counters and caches before each unit test
"""
super().setUp()
# Clear PrecompileContext cache artifacts
PrecompileContext.clear()
@requires_triton()
def test_basic(self):
"""
Test that after torch.compile, PrecompileContext._new_cache_artifacts length is 1
"""
def simple_function(x):
return x.sin() + x.cos()
compiled_fn = torch.compile(simple_function)
# Run the compiled function
x = torch.randn(10, device=GPU_TYPE, requires_grad=True)
result = compiled_fn(x)
result.sum().backward()
# Check that PrecompileContext._new_cache_artifacts_by_key has length 1
self.assertEqual(len(PrecompileContext._new_cache_artifacts_by_key), 1)
self.assertEqual(len(PrecompileContext._new_cache_artifacts), 0)
result = PrecompileContext.serialize()
assert result is not None
serialized, cache_info = result
self.assertEqual(len(cache_info.precompile_aot_autograd_artifacts), 1)
artifacts = PrecompileContext.deserialize(serialized)
assert artifacts is not None
deserialized = artifacts["precompile_aot_autograd"]
assert len(deserialized) == 1
entry = deserialized[0]
assert isinstance(entry, BundledAOTAutogradCacheArtifact)
entry = entry.after_deserialization()
assert isinstance(
entry,
BundledAOTAutogradCacheEntry,
)
# Now that we've serialized, there should be no new cache artifacts
self.assertEqual(
len(PrecompileContext._new_cache_artifacts["precompile_aot_autograd"]), 0
)
@requires_triton()
def test_serialize_by_key(self):
"""
Test that after torch.compile, PrecompileContext._new_cache_artifacts length is 1
"""
def simple_function(x):
return x.sin() + x.cos()
compiled_fn = torch.compile(simple_function)
# Run the compiled function
x = torch.randn(10, device=GPU_TYPE, requires_grad=True)
result = compiled_fn(x)
result.sum().backward()
# Check that PrecompileContext._new_cache_artifacts_by_key has length 1
# TODO: the key right now is the AOTAutogradCacheKey, but will be backend_id once
# we have torch._dynamo.package implemented
self.assertEqual(len(PrecompileContext._new_cache_artifacts_by_key), 1)
key = next(iter(PrecompileContext._new_cache_artifacts_by_key.keys()))
result = PrecompileContext.serialize_artifact_by_key(key)
assert isinstance(result, BundledAOTAutogradCacheArtifact)
self.assertEqual(result.key, key)
self.assertEqual(len(PrecompileContext._new_cache_artifacts), 0)
result = PrecompileContext.serialize()
assert result is not None
_, cache_info = result
self.assertEqual(len(cache_info.precompile_aot_autograd_artifacts), 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
_base_ = './gfl_r50_fpn_ms-2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './gfl_r50_fpn_mstrain_2x_coco.py'
model = dict(
type='GFL',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
gp.add_argument(
'--no-reduce',
'--disable-reduce',
action='store_true',
default=False,
help='Disable the built-in reduction mechanism. Set this if the reduction is to be handled by the Executor itself by operating on a `docs_matrix` or `docs_map`',
)
mixin_base_runtime_parser(gp)
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
mixin_base_runtime_parser(gp)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, Flow
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
resp = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = resp[0].docs
assert len(docs) == 5
for doc in docs:
assert doc.embedding.shape == (768,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Flow, Document
def data_generator(num_docs):
for i in range(num_docs):
doc = Document(
text='it is a good day! the dog sits on the floor.')
yield doc
def test_use_in_flow():
with Flow.load_config('flow.yml') as flow:
resp = flow.post(on='/encode', inputs=data_generator(5), return_results=True)
docs = resp[0].docs
assert len(docs) == 5
for doc in docs:
assert doc.embedding.shape == (768,)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from mmdet.registry import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
PALETTE = [(0, 255, 0)]
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive as ExportArchive
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax.linen as nn
import jax
import jax.numpy as jnp
class FlaxUpsample2D(nn.Module):
out_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
def __call__(self, hidden_states):
batch, height, width, channels = hidden_states.shape
hidden_states = jax.image.resize(
hidden_states,
shape=(batch, height * 2, width * 2, channels),
method="nearest",
)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxDownsample2D(nn.Module):
out_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(2, 2),
padding=((1, 1), (1, 1)), # padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxResnetBlock2D(nn.Module):
in_channels: int
out_channels: int = None
dropout_prob: float = 0.0
use_nin_shortcut: bool = None
dtype: jnp.dtype = jnp.float32
def setup(self):
out_channels = self.in_channels if self.out_channels is None else self.out_channels
self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
self.conv1 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype)
self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
self.dropout = nn.Dropout(self.dropout_prob)
self.conv2 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
self.conv_shortcut = None
if use_nin_shortcut:
self.conv_shortcut = nn.Conv(
out_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states, temb, deterministic=True):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.conv1(hidden_states)
temb = self.time_emb_proj(nn.swish(temb))
temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1)
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
residual = self.conv_shortcut(residual)
return hidden_states + residual
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flax.linen as nn
import jax
import jax.numpy as jnp
class FlaxUpsample2D(nn.Module):
out_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
def __call__(self, hidden_states):
batch, height, width, channels = hidden_states.shape
hidden_states = jax.image.resize(
hidden_states,
shape=(batch, height * 2, width * 2, channels),
method="nearest",
)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxDownsample2D(nn.Module):
out_channels: int
dtype: jnp.dtype = jnp.float32
def setup(self):
self.conv = nn.Conv(
self.out_channels,
kernel_size=(3, 3),
strides=(2, 2),
padding=((1, 1), (1, 1)), # padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
hidden_states = self.conv(hidden_states)
return hidden_states
class FlaxResnetBlock2D(nn.Module):
in_channels: int
out_channels: int = None
dropout_prob: float = 0.0
use_nin_shortcut: bool = None
dtype: jnp.dtype = jnp.float32
def setup(self):
out_channels = self.in_channels if self.out_channels is None else self.out_channels
self.norm1 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
self.conv1 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
self.time_emb_proj = nn.Dense(out_channels, dtype=self.dtype)
self.norm2 = nn.GroupNorm(num_groups=32, epsilon=1e-5)
self.dropout = nn.Dropout(self.dropout_prob)
self.conv2 = nn.Conv(
out_channels,
kernel_size=(3, 3),
strides=(1, 1),
padding=((1, 1), (1, 1)),
dtype=self.dtype,
)
use_nin_shortcut = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
self.conv_shortcut = None
if use_nin_shortcut:
self.conv_shortcut = nn.Conv(
out_channels,
kernel_size=(1, 1),
strides=(1, 1),
padding="VALID",
dtype=self.dtype,
)
def __call__(self, hidden_states, temb, deterministic=True):
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.conv1(hidden_states)
temb = self.time_emb_proj(nn.swish(temb))
temb = jnp.expand_dims(jnp.expand_dims(temb, 1), 1)
hidden_states = hidden_states + temb
hidden_states = self.norm2(hidden_states)
hidden_states = nn.swish(hidden_states)
hidden_states = self.dropout(hidden_states, deterministic)
hidden_states = self.conv2(hidden_states)
if self.conv_shortcut is not None:
residual = self.conv_shortcut(residual)
return hidden_states + residual
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import AzureChatOpenAI
class TestOpenAIStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": "test",
"openai_api_version": "2021-10-01",
"azure_endpoint": "https://test.azure.com",
}
@pytest.mark.xfail(reason="AzureOpenAI does not support tool_choice='any'")
def test_bind_tool_pydantic(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_bind_tool_pydantic(model, my_adder_tool)
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"AZURE_OPENAI_API_KEY": "api_key",
"AZURE_OPENAI_ENDPOINT": "https://endpoint.com",
"AZURE_OPENAI_AD_TOKEN": "token",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_VERSION": "yyyy-mm-dd",
"OPENAI_API_TYPE": "type",
},
{},
{
"openai_api_key": "api_key",
"azure_endpoint": "https://endpoint.com",
"azure_ad_token": "token",
"openai_organization": "org_id",
"openai_api_version": "yyyy-mm-dd",
"openai_api_type": "type",
},
)
|
"""Standard LangChain interface tests"""
from typing import Tuple, Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.tools import BaseTool
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import AzureChatOpenAI
class TestOpenAIStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return AzureChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"deployment_name": "test",
"openai_api_version": "2021-10-01",
"azure_endpoint": "https://test.azure.com",
}
@pytest.mark.xfail(reason="AzureOpenAI does not support tool_choice='any'")
def test_bind_tool_pydantic(
self, model: BaseChatModel, my_adder_tool: BaseTool
) -> None:
super().test_bind_tool_pydantic(model, my_adder_tool)
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"AZURE_OPENAI_API_KEY": "api_key",
"AZURE_OPENAI_ENDPOINT": "https://endpoint.com",
"AZURE_OPENAI_AD_TOKEN": "token",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_VERSION": "yyyy-mm-dd",
"OPENAI_API_TYPE": "type",
},
{},
{
"openai_api_key": "api_key",
"azure_endpoint": "https://endpoint.com",
"azure_ad_token": "token",
"openai_organization": "org_id",
"openai_api_version": "yyyy-mm-dd",
"openai_api_type": "type",
},
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import (get_device, is_cuda_available, is_mlu_available,
is_mps_available, is_musa_available,
is_npu_available)
def test_get_device():
device = get_device()
if is_npu_available():
assert device == 'npu'
elif is_cuda_available():
assert device == 'cuda'
elif is_mlu_available():
assert device == 'mlu'
elif is_mps_available():
assert device == 'mps'
elif is_musa_available():
assert device == 'musa'
else:
assert device == 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.device import (get_device, is_cuda_available, is_mlu_available,
is_mps_available, is_npu_available)
def test_get_device():
device = get_device()
if is_npu_available():
assert device == 'npu'
elif is_cuda_available():
assert device == 'cuda'
elif is_mlu_available():
assert device == 'mlu'
elif is_mps_available():
assert device == 'mps'
else:
assert device == 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_dipu_available, is_mlu_available, is_mps_available,
is_npu_available, is_npu_support_full_precision)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available', 'is_mps_available', 'is_npu_available',
'is_dipu_available', 'is_npu_support_full_precision'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .utils import (get_device, get_max_cuda_memory, is_cuda_available,
is_mlu_available, is_mps_available, is_npu_available,
is_npu_support_full_precision)
__all__ = [
'get_max_cuda_memory', 'get_device', 'is_cuda_available',
'is_mlu_available', 'is_mps_available', 'is_npu_available',
'is_npu_support_full_precision'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import BeautifulSoupTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BeautifulSoupTransformer": "langchain_community.document_transformers",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BeautifulSoupTransformer",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_transformers import BeautifulSoupTransformer
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BeautifulSoupTransformer": "langchain_community.document_transformers"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BeautifulSoupTransformer",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import AspectRatioBatchSampler
from .class_aware_sampler import ClassAwareSampler
__all__ = ['ClassAwareSampler', 'AspectRatioBatchSampler']
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adamax"])
class Adamax(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm.
Adamax, a variant of Adam based on the infinity norm, is a first-order
gradient-based optimization method. Due to its capability of adjusting the
learning rate based on data characteristics, it is suited to learn
time-variant process, e.g., speech data with dynamically changed noise
conditions. Default parameters follow those provided in the paper (see
references below).
Initialization:
```python
m = 0 # Initialize initial 1st moment vector
u = 0 # Initialize the exponentially weighted infinity norm
t = 0 # Initialize timestep
```
The update rule for parameter `w` with gradient `g` is described at the end
of section 7.1 of the paper (see the reference section):
```python
t += 1
m = beta1 * m + (1 - beta) * g
u = max(beta2 * u, abs(g))
current_lr = learning_rate / (1 - beta1 ** t)
w = w - current_lr * m / (u + epsilon)
```
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adamax",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Adamax optimizer has 2 types of variables: momentums (denoted as m),
exponentially weighted infinity norm (denoted as u).
Args:
var_list: list of model variables to build Adamax variables on.
"""
if self.built:
return
super().build(var_list)
self._m, self._u = self.add_optimizer_variables(
var_list, ["momentum", "norm"]
)
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
m = self._m[self._get_variable_index(variable)]
u = self._u[self._get_variable_index(variable)]
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), (1 - self.beta_1))
)
self.assign(
u, ops.maximum(ops.multiply(self.beta_2, u), ops.abs(gradient))
)
self.assign_sub(
variable,
ops.divide(
ops.multiply(lr, m),
ops.multiply((1 - beta_1_power), ops.add(u, self.epsilon)),
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Adamax.__doc__ = Adamax.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.optimizers import optimizer
@keras_export(["keras.optimizers.Adamax"])
class Adamax(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm.
Adamax, a variant of Adam based on the infinity norm, is a first-order
gradient-based optimization method. Due to its capability of adjusting the
learning rate based on data characteristics, it is suited to learn
time-variant process, e.g., speech data with dynamically changed noise
conditions. Default parameters follow those provided in the paper (see
references below).
Initialization:
```python
m = 0 # Initialize initial 1st moment vector
u = 0 # Initialize the exponentially weighted infinity norm
t = 0 # Initialize timestep
```
The update rule for parameter `w` with gradient `g` is described at the end
of section 7.1 of the paper (see the reference section):
```python
t += 1
m = beta1 * m + (1 - beta) * g
u = max(beta2 * u, abs(g))
current_lr = learning_rate / (1 - beta1 ** t)
w = w - current_lr * m / (u + epsilon)
```
Args:
learning_rate: A float, a
`keras.optimizers.schedules.LearningRateSchedule` instance, or
a callable that takes no arguments and returns the actual value to
use. The learning rate. Defaults to `0.001`.
beta_1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta_2: A float value or a constant float tensor. The exponential decay
rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
{{base_optimizer_keyword_args}}
Reference:
- [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
weight_decay=None,
clipnorm=None,
clipvalue=None,
global_clipnorm=None,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=None,
loss_scale_factor=None,
gradient_accumulation_steps=None,
name="adamax",
**kwargs,
):
super().__init__(
learning_rate=learning_rate,
name=name,
weight_decay=weight_decay,
clipnorm=clipnorm,
clipvalue=clipvalue,
global_clipnorm=global_clipnorm,
use_ema=use_ema,
ema_momentum=ema_momentum,
ema_overwrite_frequency=ema_overwrite_frequency,
loss_scale_factor=loss_scale_factor,
gradient_accumulation_steps=gradient_accumulation_steps,
**kwargs,
)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
def build(self, var_list):
"""Initialize optimizer variables.
Adamax optimizer has 2 types of variables: momentums (denoted as m),
exponentially weighted infinity norm (denoted as u).
Args:
var_list: list of model variables to build Adamax variables on.
"""
if self.built:
return
super().build(var_list)
self._m = self.add_optimizer_variables(var_list, "momentum")
self._u = self.add_optimizer_variables(var_list, "norm")
def update_step(self, gradient, variable, learning_rate):
"""Update step given gradient and the associated model variable."""
lr = ops.cast(learning_rate, variable.dtype)
gradient = ops.cast(gradient, variable.dtype)
local_step = ops.cast(self.iterations + 1, variable.dtype)
beta_1_power = ops.power(
ops.cast(self.beta_1, variable.dtype), local_step
)
m = self._m[self._get_variable_index(variable)]
u = self._u[self._get_variable_index(variable)]
self.assign_add(
m, ops.multiply(ops.subtract(gradient, m), (1 - self.beta_1))
)
self.assign(
u, ops.maximum(ops.multiply(self.beta_2, u), ops.abs(gradient))
)
self.assign_sub(
variable,
ops.divide(
ops.multiply(lr, m),
ops.multiply((1 - beta_1_power), ops.add(u, self.epsilon)),
),
)
def get_config(self):
config = super().get_config()
config.update(
{
"beta_1": self.beta_1,
"beta_2": self.beta_2,
"epsilon": self.epsilon,
}
)
return config
Adamax.__doc__ = Adamax.__doc__.replace(
"{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
)
|
import os
import time
import pytest
import requests as general_requests
from jina import Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_images_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'executor1'), tag='encoder-executor')
client.images.build(path=os.path.join(cur_dir, 'executor2'), tag='indexer-executor')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
def test_flow_with_docker(executor_images_built, protocol):
from docarray import BaseDoc, DocList
from typing import Optional, List
from docarray.typing import NdArray
class MyDoc(BaseDoc):
text: str
embedding: Optional[NdArray] = None
class MyDocWithMatches(MyDoc):
matches: DocList[MyDoc] = []
scores: List[float] = []
f = (
Flow(protocol=protocol)
.add(uses='docker://encoder-executor')
.add(uses='docker://indexer-executor')
)
with f:
if protocol == 'http':
resp = general_requests.get(f'http://localhost:{f.port}/openapi.json')
resp.json()
sentences = [
'This framework generates embeddings for each input sentence',
'Sentences are passed as a list of string.',
'The quick brown fox jumps over the lazy dog.',
]
inputs = DocList[MyDoc]([MyDoc(text=sentence) for sentence in sentences])
f.post(on='/index', inputs=inputs)
queries = inputs[0:2]
search_results = f.post(
on='/search', inputs=queries, return_type=DocList[MyDocWithMatches]
)
assert len(search_results) == len(queries)
for result in search_results:
assert result.text in sentences
assert len(result.matches) == len(sentences)
for m in result.matches:
assert m.text in sentences
|
import os
import time
import pytest
import requests as general_requests
from jina import Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def executor_images_built():
import docker
client = docker.from_env()
client.images.build(path=os.path.join(cur_dir, 'executor1'), tag='encoder-executor')
client.images.build(path=os.path.join(cur_dir, 'executor2'), tag='indexer-executor')
client.close()
yield
time.sleep(2)
client = docker.from_env()
client.containers.prune()
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
def test_flow_with_docker(executor_images_built, protocol):
from docarray import BaseDoc, DocList
from typing import Optional, List
from docarray.typing import NdArray
class MyDoc(BaseDoc):
text: str
embedding: Optional[NdArray] = None
class MyDocWithMatches(MyDoc):
matches: DocList[MyDoc] = []
scores: List[float] = []
f = Flow(protocol=protocol).add(uses='docker://encoder-executor').add(uses='docker://indexer-executor')
with f:
if protocol == 'http':
resp = general_requests.get(f'http://localhost:{f.port}/openapi.json')
resp.json()
sentences = ['This framework generates embeddings for each input sentence',
'Sentences are passed as a list of string.',
'The quick brown fox jumps over the lazy dog.']
inputs = DocList[MyDoc]([MyDoc(text=sentence) for sentence in sentences])
f.post(on='/index', inputs=inputs)
queries = inputs[0:2]
search_results = f.post(on='/search', inputs=queries, return_type=DocList[MyDocWithMatches])
assert len(search_results) == len(queries)
for result in search_results:
assert result.text in sentences
assert len(result.matches) == len(sentences)
for m in result.matches:
assert m.text in sentences
|
from torchaudio._internal import module_utils as _mod_utils
from . import (
sox_utils,
)
from .download import download_asset
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
__all__ = [
"download_asset",
"sox_utils",
]
|
from torchaudio._internal import module_utils as _mod_utils
from . import (
sox_utils,
)
if _mod_utils.is_sox_available():
sox_utils.set_verbosity(1)
|
from typing import Any, Dict
from pydantic.tools import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls, pb_msg: 'DocumentProto') -> 'ProtoMixin':
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
if content_type == 'tensor':
fields[field] = Tensor._read_from_proto(value.tensor)
elif content_type == 'embedding':
fields[field] = Embedding._read_from_proto(value.embedding)
elif content_type == 'any_url':
fields[field] = parse_obj_as(AnyUrl, value.any_url)
elif content_type == 'image_url':
fields[field] = parse_obj_as(ImageUrl, value.image_url)
elif content_type == 'id':
fields[field] = parse_obj_as(ID, value.id)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import Any, Dict, Type
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import Tensor
from ..abstract_document import AbstractDocument
from ..base_node import BaseNode
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['ProtoMixin']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
@classmethod
def from_protobuf(cls, pb_msg: 'DocumentProto') -> 'ProtoMixin':
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
if content_type == 'tensor':
fields[field] = Tensor.read_ndarray(value.tensor)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
T_doc = TypeVar('T_doc', bound=BaseDoc)
def create_doc(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Type['T_doc'] = BaseDoc, # type: ignore
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None, # type: ignore
__cls_kwargs__: Dict[str, Any] = None, # type: ignore
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['T_doc']:
"""
Dynamically create a subclass of BaseDoc. This is a wrapper around pydantic's create_model.
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from, must be BaseDoc or its subclass
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>`
:return: the new Document class
```python
from docarray.documents import Audio
from docarray.documents.helper import create_doc
from docarray.typing.tensor.audio import AudioNdArray
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(AudioNdArray, ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, Audio)
```
"""
if not issubclass(__base__, BaseDoc):
raise ValueError(f'{type(__base__)} is not a BaseDoc or its subclass')
doc = create_model(
__model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
__slots__=__slots__,
**field_definitions,
)
return doc
def create_doc_from_typeddict(
typeddict_cls: Type['TypedDict'], # type: ignore
**kwargs: Any,
):
"""
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict.
:param typeddict_cls: TypedDict class to use for the new Document class
:param kwargs: extra arguments to pass to `create_model_from_typeddict`
:return: the new Document class
EXAMPLE USAGE
.. code-block:: python
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.documents import Audio
from docarray.documents.helper import create_doc_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
class MyAudio(TypedDict):
title: str
tensor: AudioNdArray
Doc = create_doc_from_typeddict(MyAudio, __base__=Audio)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, Audio)
"""
if '__base__' in kwargs:
if not issubclass(kwargs['__base__'], BaseDoc):
raise ValueError(f'{kwargs["__base__"]} is not a BaseDoc or its subclass')
else:
kwargs['__base__'] = BaseDoc
doc = create_model_from_typeddict(typeddict_cls, **kwargs)
return doc
def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']:
"""
Create a subclass of BaseDoc based on example data given as a dictionary.
In case the example contains None as a value,
corresponding field will be viewed as the type Any.
:param model_name: Name of the new Document class
:param data_dict: Dictionary of field types to their corresponding values.
:return: the new Document class
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray.documents import ImageDoc
from docarray.documents.helper import create_doc_from_dict
data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
"""
if not data_dict:
raise ValueError('`data_dict` should contain at least one item')
field_types = {
field: (type(value) if value else Any, ...)
for field, value in data_dict.items()
}
return create_doc(__model_name=model_name, **field_types) # type: ignore
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
T_doc = TypeVar('T_doc', bound=BaseDoc)
def create_doc(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Type['T_doc'] = BaseDoc, # type: ignore
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None, # type: ignore
__cls_kwargs__: Dict[str, Any] = None, # type: ignore
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['T_doc']:
"""
Dynamically create a subclass of BaseDoc. This is a wrapper around pydantic's create_model.
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from, must be BaseDoc or its subclass
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>`
:return: the new Document class
EXAMPLE USAGE
.. code-block:: python
from docarray.documents import Audio
from docarray.documents.helper import create_doc
from docarray.typing.tensor.audio import AudioNdArray
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(AudioNdArray, ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, Audio)
"""
if not issubclass(__base__, BaseDoc):
raise ValueError(f'{type(__base__)} is not a BaseDoc or its subclass')
doc = create_model(
__model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
__slots__=__slots__,
**field_definitions,
)
return doc
def create_doc_from_typeddict(
typeddict_cls: Type['TypedDict'], # type: ignore
**kwargs: Any,
):
"""
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict.
:param typeddict_cls: TypedDict class to use for the new Document class
:param kwargs: extra arguments to pass to `create_model_from_typeddict`
:return: the new Document class
EXAMPLE USAGE
.. code-block:: python
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.documents import Audio
from docarray.documents.helper import create_doc_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
class MyAudio(TypedDict):
title: str
tensor: AudioNdArray
Doc = create_doc_from_typeddict(MyAudio, __base__=Audio)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, Audio)
"""
if '__base__' in kwargs:
if not issubclass(kwargs['__base__'], BaseDoc):
raise ValueError(f'{kwargs["__base__"]} is not a BaseDoc or its subclass')
else:
kwargs['__base__'] = BaseDoc
doc = create_model_from_typeddict(typeddict_cls, **kwargs)
return doc
def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']:
"""
Create a subclass of BaseDoc based on example data given as a dictionary.
In case the example contains None as a value,
corresponding field will be viewed as the type Any.
:param model_name: Name of the new Document class
:param data_dict: Dictionary of field types to their corresponding values.
:return: the new Document class
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray.documents import ImageDoc
from docarray.documents.helper import create_doc_from_dict
data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
"""
if not data_dict:
raise ValueError('`data_dict` should contain at least one item')
field_types = {
field: (type(value) if value else Any, ...)
for field, value in data_dict.items()
}
return create_doc(__model_name=model_name, **field_types) # type: ignore
|
import json
from typing import Any, Callable, Optional, Union
from langchain_core.utils.json import parse_json_markdown
from langchain.evaluation.schema import StringEvaluator
class JsonEditDistanceEvaluator(StringEvaluator):
"""
An evaluator that calculates the edit distance between JSON strings.
This evaluator computes a normalized Damerau-Levenshtein distance between two JSON strings
after parsing them and converting them to a canonical format (i.e., whitespace and key order are normalized).
It can be customized with alternative distance and canonicalization functions.
Args:
string_distance (Optional[Callable[[str, str], float]]): A callable that computes the distance between two strings.
If not provided, a Damerau-Levenshtein distance from the `rapidfuzz` package will be used.
canonicalize (Optional[Callable[[Any], Any]]): A callable that converts a parsed JSON object into its canonical string form.
If not provided, the default behavior is to serialize the JSON with sorted keys and no extra whitespace.
**kwargs (Any): Additional keyword arguments.
Attributes:
_string_distance (Callable[[str, str], float]): The internal distance computation function.
_canonicalize (Callable[[Any], Any]): The internal canonicalization function.
Examples:
>>> evaluator = JsonEditDistanceEvaluator()
>>> result = evaluator.evaluate_strings(prediction='{"a": 1, "b": 2}', reference='{"a": 1, "b": 3}')
>>> assert result["score"] is not None
Raises:
ImportError: If `rapidfuzz` is not installed and no alternative `string_distance` function is provided.
""" # noqa: E501
def __init__(
self,
string_distance: Optional[Callable[[str, str], float]] = None,
canonicalize: Optional[Callable[[Any], Any]] = None,
**kwargs: Any,
) -> None:
super().__init__()
if string_distance is not None:
self._string_distance = string_distance
else:
try:
from rapidfuzz import distance as rfd
except ImportError:
msg = (
"The default string_distance operator for the "
" JsonEditDistanceEvaluator requires installation of "
"the rapidfuzz package. "
"Please install it with `pip install rapidfuzz`."
)
raise ImportError(msg)
self._string_distance = rfd.DamerauLevenshtein.normalized_distance
if canonicalize is not None:
self._canonicalize = canonicalize
else:
self._canonicalize = lambda x: json.dumps(
x,
separators=(",", ":"),
sort_keys=True, # eliminate whitespace
)
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "json_edit_distance"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
return node
def _evaluate_strings(
self,
prediction: str,
input: Optional[str] = None,
reference: Optional[str] = None,
**kwargs: Any,
) -> dict:
parsed = self._canonicalize(self._parse_json(prediction))
label = self._canonicalize(self._parse_json(reference))
distance = self._string_distance(parsed, label)
return {"score": distance}
|
import json
from typing import Any, Callable, Optional, Union
from langchain_core.utils.json import parse_json_markdown
from langchain.evaluation.schema import StringEvaluator
class JsonEditDistanceEvaluator(StringEvaluator):
"""
An evaluator that calculates the edit distance between JSON strings.
This evaluator computes a normalized Damerau-Levenshtein distance between two JSON strings
after parsing them and converting them to a canonical format (i.e., whitespace and key order are normalized).
It can be customized with alternative distance and canonicalization functions.
Args:
string_distance (Optional[Callable[[str, str], float]]): A callable that computes the distance between two strings.
If not provided, a Damerau-Levenshtein distance from the `rapidfuzz` package will be used.
canonicalize (Optional[Callable[[Any], Any]]): A callable that converts a parsed JSON object into its canonical string form.
If not provided, the default behavior is to serialize the JSON with sorted keys and no extra whitespace.
**kwargs (Any): Additional keyword arguments.
Attributes:
_string_distance (Callable[[str, str], float]): The internal distance computation function.
_canonicalize (Callable[[Any], Any]): The internal canonicalization function.
Examples:
>>> evaluator = JsonEditDistanceEvaluator()
>>> result = evaluator.evaluate_strings(prediction='{"a": 1, "b": 2}', reference='{"a": 1, "b": 3}')
>>> assert result["score"] is not None
Raises:
ImportError: If `rapidfuzz` is not installed and no alternative `string_distance` function is provided.
""" # noqa: E501
def __init__(
self,
string_distance: Optional[Callable[[str, str], float]] = None,
canonicalize: Optional[Callable[[Any], Any]] = None,
**kwargs: Any,
) -> None:
super().__init__()
if string_distance is not None:
self._string_distance = string_distance
else:
try:
from rapidfuzz import distance as rfd
except ImportError:
raise ImportError(
"The default string_distance operator for the "
" JsonEditDistanceEvaluator requires installation of "
"the rapidfuzz package. "
"Please install it with `pip install rapidfuzz`."
)
self._string_distance = rfd.DamerauLevenshtein.normalized_distance
if canonicalize is not None:
self._canonicalize = canonicalize
else:
self._canonicalize = lambda x: json.dumps(
x,
separators=(",", ":"),
sort_keys=True, # eliminate whitespace
)
@property
def requires_input(self) -> bool:
return False
@property
def requires_reference(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
return "json_edit_distance"
def _parse_json(self, node: Any) -> Union[dict, list, None, float, bool, int, str]:
if isinstance(node, str):
return parse_json_markdown(node)
return node
def _evaluate_strings(
self,
prediction: str,
input: Optional[str] = None,
reference: Optional[str] = None,
**kwargs: Any,
) -> dict:
parsed = self._canonicalize(self._parse_json(prediction))
label = self._canonicalize(self._parse_json(reference))
distance = self._string_distance(parsed, label)
return {"score": distance}
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa
depths = [2, 2, 6, 2]
model = dict(
type='Mask2Former',
backbone=dict(
_delete_=True,
type='SwinTransformer',
embed_dims=96,
depths=depths,
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(0, 1, 2, 3),
with_cp=False,
convert_weights=True,
frozen_stages=-1,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(
type='Mask2FormerHead', in_channels=[96, 192, 384, 768]),
init_cfg=None)
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optimizer = dict(
type='AdamW',
lr=0.0001,
weight_decay=0.05,
eps=1e-8,
betas=(0.9, 0.999),
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
__version__ = '0.13.23'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.22'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import Hook
from mmdet.registry import HOOKS
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def after_iter(self, runner):
if self.every_n_iters(runner, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner.hooks import HOOKS, Hook
@HOOKS.register_module()
class MemoryProfilerHook(Hook):
"""Memory profiler hook recording memory information including virtual
memory, swap memory, and the memory of the current process.
Args:
interval (int): Checking interval (every k iterations).
Default: 50.
"""
def __init__(self, interval=50):
try:
from psutil import swap_memory, virtual_memory
self._swap_memory = swap_memory
self._virtual_memory = virtual_memory
except ImportError:
raise ImportError('psutil is not installed, please install it by: '
'pip install psutil')
try:
from memory_profiler import memory_usage
self._memory_usage = memory_usage
except ImportError:
raise ImportError(
'memory_profiler is not installed, please install it by: '
'pip install memory_profiler')
self.interval = interval
def after_iter(self, runner):
if self.every_n_iters(runner, self.interval):
# in Byte
virtual_memory = self._virtual_memory()
swap_memory = self._swap_memory()
# in MB
process_memory = self._memory_usage()[0]
factor = 1024 * 1024
runner.logger.info(
'Memory information '
'available_memory: '
f'{round(virtual_memory.available / factor)} MB, '
'used_memory: '
f'{round(virtual_memory.used / factor)} MB, '
f'memory_utilization: {virtual_memory.percent} %, '
'available_swap_memory: '
f'{round((swap_memory.total - swap_memory.used) / factor)}'
' MB, '
f'used_swap_memory: {round(swap_memory.used / factor)} MB, '
f'swap_memory_utilization: {swap_memory.percent} %, '
'current_process_memory: '
f'{round(process_memory)} MB')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.