input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Utilities for working with HTML."""
import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
".ico",
".png",
".jpg",
".jpeg",
".gif",
".svg",
".csv",
".bz2",
".zip",
".epub",
)
SUFFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) + r"[\#'\"]" for s in SUFFIXES_TO_IGNORE]) + ")"
)
PREFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) for s in PREFIXES_TO_IGNORE]) + ")"
)
DEFAULT_LINK_REGEX = (
rf"href=[\"']{PREFIXES_TO_IGNORE_REGEX}((?:{SUFFIXES_TO_IGNORE_REGEX}.)*?)[\#'\"]"
)
def find_all_links(
raw_html: str, *, pattern: Union[str, re.Pattern, None] = None
) -> list[str]:
"""Extract all links from a raw HTML string.
Args:
raw_html: original HTML.
pattern: Regex to use for extracting links from raw HTML.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
def extract_sub_links(
raw_html: str,
url: str,
*,
base_url: Optional[str] = None,
pattern: Union[str, re.Pattern, None] = None,
prevent_outside: bool = True,
exclude_prefixes: Sequence[str] = (),
continue_on_failure: bool = False,
) -> list[str]:
"""Extract all links from a raw HTML string and convert into absolute paths.
Args:
raw_html: original HTML.
url: the url of the HTML.
base_url: the base URL to check for outside links against.
pattern: Regex to use for extracting links from raw HTML.
prevent_outside: If True, ignore external links which are not children
of the base URL.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
continue_on_failure: If True, continue if parsing a specific link raises an
exception. Otherwise, raise the exception.
Returns:
List[str]: sub links.
"""
base_url_to_use = base_url if base_url is not None else url
parsed_base_url = urlparse(base_url_to_use)
parsed_url = urlparse(url)
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
try:
parsed_link = urlparse(link)
# Some may be absolute links like https://to/path
if parsed_link.scheme in {"http", "https"}:
absolute_path = link
# Some may have omitted the protocol like //to/path
elif link.startswith("//"):
absolute_path = f"{parsed_url.scheme}:{link}"
else:
absolute_path = urljoin(url, parsed_link.path)
if parsed_link.query:
absolute_path += f"?{parsed_link.query}"
absolute_paths.add(absolute_path)
except Exception as e:
if continue_on_failure:
logger.warning(
"Unable to load link %s. Raised exception:\n\n%s", link, e
)
continue
raise
results = []
for path in absolute_paths:
if any(path.startswith(exclude_prefix) for exclude_prefix in exclude_prefixes):
continue
if prevent_outside:
parsed_path = urlparse(path)
if parsed_base_url.netloc != parsed_path.netloc:
continue
# Will take care of verifying rest of path after netloc
# if it's more specific
if not path.startswith(base_url_to_use):
continue
results.append(path)
return results
|
"""Utilities for working with HTML."""
import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
".ico",
".png",
".jpg",
".jpeg",
".gif",
".svg",
".csv",
".bz2",
".zip",
".epub",
)
SUFFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) + r"[\#'\"]" for s in SUFFIXES_TO_IGNORE]) + ")"
)
PREFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) for s in PREFIXES_TO_IGNORE]) + ")"
)
DEFAULT_LINK_REGEX = (
rf"href=[\"']{PREFIXES_TO_IGNORE_REGEX}((?:{SUFFIXES_TO_IGNORE_REGEX}.)*?)[\#'\"]"
)
def find_all_links(
raw_html: str, *, pattern: Union[str, re.Pattern, None] = None
) -> list[str]:
"""Extract all links from a raw HTML string.
Args:
raw_html: original HTML.
pattern: Regex to use for extracting links from raw HTML.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
def extract_sub_links(
raw_html: str,
url: str,
*,
base_url: Optional[str] = None,
pattern: Union[str, re.Pattern, None] = None,
prevent_outside: bool = True,
exclude_prefixes: Sequence[str] = (),
continue_on_failure: bool = False,
) -> list[str]:
"""Extract all links from a raw HTML string and convert into absolute paths.
Args:
raw_html: original HTML.
url: the url of the HTML.
base_url: the base URL to check for outside links against.
pattern: Regex to use for extracting links from raw HTML.
prevent_outside: If True, ignore external links which are not children
of the base URL.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
continue_on_failure: If True, continue if parsing a specific link raises an
exception. Otherwise, raise the exception.
Returns:
List[str]: sub links.
"""
base_url_to_use = base_url if base_url is not None else url
parsed_base_url = urlparse(base_url_to_use)
parsed_url = urlparse(url)
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
try:
parsed_link = urlparse(link)
# Some may be absolute links like https://to/path
if parsed_link.scheme == "http" or parsed_link.scheme == "https":
absolute_path = link
# Some may have omitted the protocol like //to/path
elif link.startswith("//"):
absolute_path = f"{parsed_url.scheme}:{link}"
else:
absolute_path = urljoin(url, parsed_link.path)
if parsed_link.query:
absolute_path += f"?{parsed_link.query}"
absolute_paths.add(absolute_path)
except Exception as e:
if continue_on_failure:
logger.warning(
"Unable to load link %s. Raised exception:\n\n%s", link, e
)
continue
raise
results = []
for path in absolute_paths:
if any(path.startswith(exclude_prefix) for exclude_prefix in exclude_prefixes):
continue
if prevent_outside:
parsed_path = urlparse(path)
if parsed_base_url.netloc != parsed_path.netloc:
continue
# Will take care of verifying rest of path after netloc
# if it's more specific
if not path.startswith(base_url_to_use):
continue
results.append(path)
return results
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.AutoContrast")
class AutoContrast(BaseImagePreprocessingLayer):
"""Performs the auto-contrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
This layer is active at both training and inference time.
Args:
value_range: Range of values the incoming images will have.
Represented as a two number tuple written `(low, high)`.
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
Defaults to `(0, 255)`.
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
value_range=(0, 255),
**kwargs,
):
super().__init__(**kwargs)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def transform_images(self, images, transformation=None, training=True):
original_images = images
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, self.compute_dtype)
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
results = self.backend.numpy.clip(images, 0.0, 255.0)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
results = self.backend.numpy.where(
self.backend.numpy.isnan(results), original_images, results
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.ops.core import _saturate_cast
@keras_export("keras.layers.AutoContrast")
class AutoContrast(BaseImagePreprocessingLayer):
"""Performs the auto-contrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An
example of this is if an image only has values `[0, 1]` out of the range
`[0, 255]`, auto contrast will change the `1` values to be `255`.
This layer is active at both training and inference time.
Args:
value_range: Range of values the incoming images will have.
Represented as a two number tuple written `(low, high)`.
This is typically either `(0, 1)` or `(0, 255)` depending
on how your preprocessing pipeline is set up.
Defaults to `(0, 255)`.
"""
_USE_BASE_FACTOR = False
_VALUE_RANGE_VALIDATION_ERROR = (
"The `value_range` argument should be a list of two numbers. "
)
def __init__(
self,
value_range=(0, 255),
**kwargs,
):
super().__init__(**kwargs)
self._set_value_range(value_range)
def _set_value_range(self, value_range):
if not isinstance(value_range, (tuple, list)):
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
if len(value_range) != 2:
raise ValueError(
self._VALUE_RANGE_VALIDATION_ERROR
+ f"Received: value_range={value_range}"
)
self.value_range = sorted(value_range)
def transform_images(self, images, transformation=None, training=True):
original_images = images
images = self._transform_value_range(
images,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)
images = self.backend.cast(images, self.compute_dtype)
low = self.backend.numpy.min(images, axis=(1, 2), keepdims=True)
high = self.backend.numpy.max(images, axis=(1, 2), keepdims=True)
scale = 255.0 / (high - low)
offset = -low * scale
images = images * scale + offset
results = self.backend.numpy.clip(images, 0.0, 255.0)
results = self._transform_value_range(
results,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
results = self.backend.numpy.where(
self.backend.numpy.isnan(results), original_images, results
)
if results.dtype == images.dtype:
return results
if backend.is_int_dtype(images.dtype):
results = self.backend.numpy.round(results)
return _saturate_cast(results, images.dtype, self.backend)
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
@pytest.mark.parametrize(
'storage, config',
[
('memory', None),
('weaviate', {'n_dim': 3, 'distance': 'l2-squared'}),
('annlite', {'n_dim': 3, 'metric': 'Euclidean'}),
('qdrant', {'n_dim': 3, 'distance': 'euclidean'}),
('elasticsearch', {'n_dim': 3, 'distance': 'l2_norm'}),
('sqlite', dict()),
],
)
def test_del_subindex(storage, config):
n_dim = 3
subindex_configs = (
{'@c': dict()} if storage in ['sqlite', 'memory'] else {'@c': {'n_dim': 2}}
)
da = DocumentArray(
storage=storage,
config=config,
subindex_configs=subindex_configs,
)
with da:
da.extend(
[
Document(
id=str(i),
embedding=i * np.ones(n_dim),
chunks=[
Document(id=str(i) + '_0', embedding=np.array([i, i])),
Document(id=str(i) + '_1', embedding=np.array([i, i])),
],
)
for i in range(10)
]
)
del da['0']
assert len(da) == 9
assert len(da._subindices['@c']) == 18
del da[-2:]
assert len(da) == 7
assert len(da._subindices['@c']) == 14
def test_del_subindex_annlite_multimodal():
from docarray import dataclass
from docarray.typing import Text
@dataclass
class MMDoc:
my_text: Text
my_other_text: Text
n_dim = 3
da = DocumentArray(
storage='annlite',
config={'n_dim': n_dim, 'metric': 'Euclidean'},
subindex_configs={'@.[my_text, my_other_text]': {'n_dim': 2}},
)
num_docs = 10
docs_to_add = DocumentArray(
[
Document(MMDoc(my_text='hello', my_other_text='world'))
for _ in range(num_docs)
]
)
for i, d in enumerate(docs_to_add):
d.id = str(i)
d.embedding = i * np.ones(n_dim)
d.my_text.id = str(i) + '_0'
d.my_text.embedding = [i, i]
d.my_other_text.id = str(i) + '_1'
d.my_other_text.embedding = [i, i]
with da:
da.extend(docs_to_add)
del da['0']
assert len(da) == 9
assert len(da._subindices['@.[my_text, my_other_text]']) == 18
|
import pytest
from docarray import DocumentArray, Document
from docarray.array.weaviate import DocumentArrayWeaviate
import numpy as np
@pytest.fixture()
def docs():
return DocumentArray([Document(id=f'{i}') for i in range(1, 10)])
@pytest.mark.parametrize(
'to_delete',
[
0,
1,
4,
-1,
list(range(1, 4)),
[2, 4, 7, 1, 1],
slice(0, 2),
slice(2, 4),
slice(4, -1),
[True, True, False],
...,
],
)
def test_del_all(docs, to_delete):
doc_to_delete = docs[to_delete]
del docs[to_delete]
assert doc_to_delete not in docs
@pytest.mark.parametrize(
'to_delete, missing_id',
[
([True, False], ['1']),
([True, True, False], ['1', '2']),
([False, True], ['2']),
([False, False, True, True], ['3', '4']),
],
)
def test_del_boolean_mask(docs, to_delete, missing_id):
# assert each missing_id is present before deleting
for m_id in missing_id:
assert m_id in docs[:, 'id']
del docs[to_delete]
# assert each missing_id is NOT present AFTER deleting
for m_id in missing_id:
assert m_id not in docs[:, 'id']
@pytest.mark.parametrize(
['deleted_ids', 'expected_ids'],
[
(['1', '2', '3', '4'], ['5', '6', '7', '8', '9']),
(['2', '4', '7', '1'], ['3', '5', '6', '8', '9']),
],
)
def test_del_by_multiple_idx(docs, deleted_ids, expected_ids):
del docs[deleted_ids]
assert docs[:, 'id'] == expected_ids
@pytest.mark.parametrize(
'da_cls,config,persist',
[
(DocumentArrayWeaviate, {'n_dim': 10}, False),
(DocumentArrayWeaviate, {'name': 'Storage', 'n_dim': 10}, True),
],
)
def test_del_da_persist(da_cls, config, persist, docs, start_storage):
da = da_cls(docs, config=config)
del da
da2 = da_cls(config=config)
if persist:
assert len(da2) == len(docs)
else:
assert len(da2) == 0
def test_del_da_attribute():
da = DocumentArray(
[
Document(embedding=np.array([1, 2, 3]), text='d1'),
Document(embedding=np.array([1, 2, 3]), text='d2'),
]
)
q = DocumentArray(
[
Document(embedding=np.array([4, 5, 6]), text='q1'),
Document(embedding=np.array([2, 3, 4]), text='q1'),
]
)
da.match(q)
del da[...][:, 'embedding']
for d in da:
assert d.embedding is None
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer("bert-base-uncased")
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(test_evaluator)
|
"""
This example runs a CNN after the word embedding lookup. The output of the CNN is than pooled,
for example with mean-pooling.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_cnn-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to vectors using BERT
word_embedding_model = models.Transformer("bert-base-uncased")
cnn = models.CNN(
in_word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
out_channels=256,
kernel_sizes=[1, 3, 5],
)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
cnn.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, cnn, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import DATASETS
def test_xml_dataset():
dataconfig = {
'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
'img_prefix': 'data/VOCdevkit/VOC2007/',
'pipeline': [{
'type': 'LoadImageFromFile'
}]
}
XMLDataset = DATASETS.get('XMLDataset')
class XMLDatasetSubClass(XMLDataset):
CLASSES = None
# get_ann_info and _filter_imgs of XMLDataset
# would use self.CLASSES, we added CLASSES not NONE
with pytest.raises(AssertionError):
XMLDatasetSubClass(**dataconfig)
|
import pytest
from mmdet.datasets import DATASETS
def test_xml_dataset():
dataconfig = {
'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
'img_prefix': 'data/VOCdevkit/VOC2007/',
'pipeline': [{
'type': 'LoadImageFromFile'
}]
}
XMLDataset = DATASETS.get('XMLDataset')
class XMLDatasetSubClass(XMLDataset):
CLASSES = None
# get_ann_info and _filter_imgs of XMLDataset
# would use self.CLASSES, we added CLASSES not NONE
with pytest.raises(AssertionError):
XMLDatasetSubClass(**dataconfig)
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth'
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))
# `lr` and `weight_decay` have been searched to be optimal.
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
weight_decay=0.1,
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth'
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)))
# `lr` and `weight_decay` have been searched to be optimal.
optimizer = dict(
_delete_=True,
type='AdamW',
lr=0.0001,
weight_decay=0.1,
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# training schedule, voc dataset is repeated 3 times, in
# `_base_/datasets/voc0712.py`, so the actual epoch = 4 * 3 = 12
max_epochs = 4
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[3],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .functional_test_impl import Functional64OnlyTestImpl, FunctionalTestImpl
@skipIfNoCuda
class FunctionalFloat32CUDATest(FunctionalTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda", 0)
@skipIfNoCuda
class FunctionalFloat64CUDATest(FunctionalTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda", 0)
@skipIfNoCuda
class FunctionalFloat64OnlyCUDATest(Functional64OnlyTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .functional_test_impl import FunctionalTestImpl
@skipIfNoCuda
class FunctionalFloat32CUDATest(FunctionalTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class FunctionalFloat64CUDATest(FunctionalTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
from PIL import Image
from .utils import verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""Stanford Cars Dataset
The Cars dataset contains 16,185 images of 196 classes of cars. The data is
split into 8,144 training images and 8,041 testing images, where each class
has been split roughly in a 50-50 split
The original URL is https://ai.stanford.edu/~jkrause/cars/car_dataset.html, but it is broken.
.. note::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (str or ``pathlib.Path``): Root directory of dataset
split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): This parameter exists for backward compatibility but it does not
download the dataset, since the original URL is not available anymore. The dataset
seems to be available on Kaggle so you can try to manually download it using
`these instructions <https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616>`_.
"""
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
try:
import scipy.io as sio
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "stanford_cars"
devkit = self._base_folder / "devkit"
if self._split == "train":
self._annotations_mat_path = devkit / "cars_train_annos.mat"
self._images_base_path = self._base_folder / "cars_train"
else:
self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
self._images_base_path = self._base_folder / "cars_test"
if download:
self.download()
if not self._check_exists():
raise RuntimeError(
"Dataset not found. Try to manually download following the instructions in "
"https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616."
)
self._samples = [
(
str(self._images_base_path / annotation["fname"]),
annotation["class"] - 1, # Original target mapping starts from 1, hence -1
)
for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
]
self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
"""Returns pil_image and class_id for given index"""
image_path, target = self._samples[idx]
pil_image = Image.open(image_path).convert("RGB")
if self.transform is not None:
pil_image = self.transform(pil_image)
if self.target_transform is not None:
target = self.target_transform(target)
return pil_image, target
def _check_exists(self) -> bool:
if not (self._base_folder / "devkit").is_dir():
return False
return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
def download(self):
raise ValueError(
"The original URL is broken so the StanfordCars dataset is not available for automatic "
"download anymore. You can try to download it manually following "
"https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616, "
"and set download=False to avoid this error."
)
|
import pathlib
from typing import Any, Callable, Optional, Tuple
from PIL import Image
from .utils import verify_str_arg
from .vision import VisionDataset
class StanfordCars(VisionDataset):
"""Stanford Cars Dataset
The Cars dataset contains 16,185 images of 196 classes of cars. The data is
split into 8,144 training images and 8,041 testing images, where each class
has been split roughly in a 50-50 split
The original URL is https://ai.stanford.edu/~jkrause/cars/car_dataset.html, but it is broken.
.. note::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of dataset
split (string, optional): The dataset split, supports ``"train"`` (default) or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): This parameter exists for backward compatibility but it does not
download the dataset, since the original URL is not available anymore. The dataset
seems to be available on Kaggle so you can try to manually download it using
`these instructions <https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616>`_.
"""
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
try:
import scipy.io as sio
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: pip install scipy")
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "stanford_cars"
devkit = self._base_folder / "devkit"
if self._split == "train":
self._annotations_mat_path = devkit / "cars_train_annos.mat"
self._images_base_path = self._base_folder / "cars_train"
else:
self._annotations_mat_path = self._base_folder / "cars_test_annos_withlabels.mat"
self._images_base_path = self._base_folder / "cars_test"
if download:
self.download()
if not self._check_exists():
raise RuntimeError(
"Dataset not found. Try to manually download following the instructions in "
"https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616."
)
self._samples = [
(
str(self._images_base_path / annotation["fname"]),
annotation["class"] - 1, # Original target mapping starts from 1, hence -1
)
for annotation in sio.loadmat(self._annotations_mat_path, squeeze_me=True)["annotations"]
]
self.classes = sio.loadmat(str(devkit / "cars_meta.mat"), squeeze_me=True)["class_names"].tolist()
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
"""Returns pil_image and class_id for given index"""
image_path, target = self._samples[idx]
pil_image = Image.open(image_path).convert("RGB")
if self.transform is not None:
pil_image = self.transform(pil_image)
if self.target_transform is not None:
target = self.target_transform(target)
return pil_image, target
def _check_exists(self) -> bool:
if not (self._base_folder / "devkit").is_dir():
return False
return self._annotations_mat_path.exists() and self._images_base_path.is_dir()
def download(self):
raise ValueError(
"The original URL is broken so the StanfordCars dataset is not available for automatic "
"download anymore. You can try to download it manually following "
"https://github.com/pytorch/vision/issues/7545#issuecomment-1631441616, "
"and set download=False to avoid this error."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.core.utils.typing import ConfigType, OptMultiConfig
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (list[int]): Strides of input feature maps.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self) -> int:
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg: ConfigType,
featmap_strides: List[int]) -> nn.ModuleList:
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config RoI layer operation. Options are modules under
``mmcv/ops`` such as ``RoIAlign``.
featmap_strides (list[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
:obj:`nn.ModuleList`: The RoI extractor modules for each level
feature map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor:
"""Scale RoI coordinates by scale factor.
Args:
rois (Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
init_cfg=None):
super(BaseRoIExtractor, self).__init__(init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg, featmap_strides):
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (dict): Dictionary to construct and config RoI layer
operation. Options are modules under ``mmcv/ops`` such as
``RoIAlign``.
featmap_strides (List[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
nn.ModuleList: The RoI extractor modules for each level feature
map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois, scale_factor):
"""Scale RoI coordinates by scale factor.
Args:
rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
torch.Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self, feats, rois, roi_scale_factor=None):
pass
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.generator = self.backend.random.SeedGenerator()
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformations=None, **kwargs):
should_apply = (
transformations
if transformations is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(should_apply, grayscale_images, images)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
@keras_export("keras.layers.RandomGrayscale")
class RandomGrayscale(BaseImagePreprocessingLayer):
"""Preprocessing layer for random conversion of RGB images to grayscale.
This layer randomly converts input images to grayscale with a specified
factor. When applied, it maintains the original number of channels
but sets all channels to the same grayscale value. This can be useful
for data augmentation and training models to be robust to color
variations.
The conversion preserves the perceived luminance of the original color
image using standard RGB to grayscale conversion coefficients. Images
that are not selected for conversion remain unchanged.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Args:
factor: Float between 0 and 1, specifying the factor of
converting each image to grayscale. Defaults to 0.5. A value of
1.0 means all images will be converted, while 0.0 means no images
will be converted.
data_format: String, one of `"channels_last"` (default) or
`"channels_first"`. The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format,
or `(..., channels, height, width)`, in `"channels_first"` format.
Output shape:
Same as input shape. The output maintains the same number of channels
as the input, even for grayscale-converted images where all channels
will have the same value.
"""
def __init__(self, factor=0.5, data_format=None, **kwargs):
super().__init__(**kwargs)
if factor < 0 or factor > 1:
raise ValueError(
"`factor` should be between 0 and 1. "
f"Received: factor={factor}"
)
self.factor = factor
self.data_format = backend.standardize_data_format(data_format)
self.random_generator = self.backend.random.SeedGenerator()
def get_random_transformation(self, images, training=True, seed=None):
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
random_values = self.backend.random.uniform(
shape=(self.backend.core.shape(images)[0],),
minval=0,
maxval=1,
seed=seed,
)
should_apply = self.backend.numpy.expand_dims(
random_values < self.factor, axis=[1, 2, 3]
)
return should_apply
def transform_images(self, images, transformations=None, **kwargs):
should_apply = (
transformations
if transformations is not None
else self.get_random_transformation(images)
)
grayscale_images = self.backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
return self.backend.numpy.where(should_apply, grayscale_images, images)
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs, **kwargs):
return inputs
def transform_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes
def transform_labels(self, labels, transformations=None, **kwargs):
return labels
def transform_segmentation_masks(
self, segmentation_masks, transformations=None, **kwargs
):
return segmentation_masks
def get_config(self):
config = super().get_config()
config.update({"factor": self.factor})
return config
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict
import pytest
import numpy as np
from jina import DocumentArray, Document
from ...torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize(
['content', 'out_shape'],
[
([np.ones((10, 10, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((360, 420, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((300, 300, 3), dtype=np.uint8), (3, 224, 224)])
]
)
def test_preprocessing_reshape_correct(
content: np.ndarray,
out_shape: Tuple
):
encoder = ImageTorchEncoder()
reshaped_content = encoder._preprocess(content)
assert reshaped_content.shape == out_shape, f'Expected shape {out_shape} but got {reshaped_content.shape}'
@pytest.mark.parametrize(
'traversal_paths, docs',
[
(('r', ), pytest.lazy_fixture('docs_with_blobs')),
(('c', ), pytest.lazy_fixture('docs_with_chunk_blobs'))
]
)
def test_encode_image_returns_correct_length(traversal_paths: Tuple[str], docs: DocumentArray) -> None:
encoder = ImageTorchEncoder(default_traversal_path=traversal_paths)
encoder.encode(docs=docs, parameters={})
for doc in docs.traverse_flat(traversal_paths):
assert doc.embedding is not None
assert doc.embedding.shape == (512, )
@pytest.mark.parametrize(
'model_name',
[
'resnet50',
'mobilenet_v3_large',
'googlenet'
]
)
def test_encodes_semantic_meaning(test_images: Dict[str, np.array], model_name: str):
encoder = ImageTorchEncoder(model_name=model_name)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
def test_no_preprocessing():
encoder = ImageTorchEncoder(use_default_preprocessing=False)
# without pre-processing the user needs to provide the right shape for the model directly
arr_in = np.ones((3, 224, 224), dtype=np.float32)
docs = DocumentArray([Document(blob=arr_in)])
encoder.encode(docs=docs, parameters={})
assert docs[0].embedding.shape == (512, )
def test_empty_doc_array():
docs = DocumentArray()
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert len(docs) == 0
def test_docs_array_with_no_text():
docs = DocumentArray([Document(text='hello world')])
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding is None
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict
import pytest
import numpy as np
from jina import DocumentArray, Document
try:
from torch_encoder import ImageTorchEncoder
except:
from jinahub.image.encoder.torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize(
['content', 'out_shape'],
[
([np.ones((10, 10, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((360, 420, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((300, 300, 3), dtype=np.uint8), (3, 224, 224)])
]
)
def test_preprocessing_reshape_correct(
content: np.ndarray,
out_shape: Tuple
):
encoder = ImageTorchEncoder()
reshaped_content = encoder._preprocess(content)
assert reshaped_content.shape == out_shape, f'Expected shape {out_shape} but got {reshaped_content.shape}'
@pytest.mark.parametrize(
'traversal_paths, docs',
[
(('r', ), pytest.lazy_fixture('docs_with_blobs')),
(('c', ), pytest.lazy_fixture('docs_with_chunk_blobs'))
]
)
def test_encode_image_returns_correct_length(traversal_paths: Tuple[str], docs: DocumentArray) -> None:
encoder = ImageTorchEncoder(default_traversal_path=traversal_paths)
encoder.encode(docs=docs, parameters={})
for doc in docs.traverse_flat(traversal_paths):
assert doc.embedding is not None
assert doc.embedding.shape == (512, )
@pytest.mark.parametrize(
'model_name',
[
'resnet50',
'mobilenet_v3_large',
'googlenet'
]
)
def test_encodes_semantic_meaning(test_images: Dict[str, np.array], model_name: str):
encoder = ImageTorchEncoder(model_name=model_name)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
def test_no_preprocessing():
encoder = ImageTorchEncoder(use_default_preprocessing=False)
# without pre-processing the user needs to provide the right shape for the model directly
arr_in = np.ones((3, 224, 224), dtype=np.float32)
docs = DocumentArray([Document(blob=arr_in)])
encoder.encode(docs=docs, parameters={})
assert docs[0].embedding.shape == (512, )
def test_empty_doc_array():
docs = DocumentArray()
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert len(docs) == 0
def test_docs_array_with_no_text():
docs = DocumentArray([Document(text='hello world')])
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding is None
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
_: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_kwargs: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42) # type: ignore[comparison-overlap]
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42} # type: ignore[comparison-overlap]
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42} # type: ignore[comparison-overlap]
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7} # type: ignore[comparison-overlap]
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7} # type: ignore[comparison-overlap]
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7) # type: ignore[comparison-overlap]
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
_: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_kwargs: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from keras.src.losses.losses import BinaryCrossentropy
from keras.src.losses.losses import BinaryFocalCrossentropy
from keras.src.losses.losses import CategoricalCrossentropy
from keras.src.losses.losses import CategoricalFocalCrossentropy
from keras.src.losses.losses import CategoricalGeneralizedCrossEntropy
from keras.src.losses.losses import CategoricalHinge
from keras.src.losses.losses import Circle
from keras.src.losses.losses import CosineSimilarity
from keras.src.losses.losses import Dice
from keras.src.losses.losses import Hinge
from keras.src.losses.losses import Huber
from keras.src.losses.losses import KLDivergence
from keras.src.losses.losses import LogCosh
from keras.src.losses.losses import MeanAbsoluteError
from keras.src.losses.losses import MeanAbsolutePercentageError
from keras.src.losses.losses import MeanSquaredError
from keras.src.losses.losses import MeanSquaredLogarithmicError
from keras.src.losses.losses import Poisson
from keras.src.losses.losses import SparseCategoricalCrossentropy
from keras.src.losses.losses import SquaredHinge
from keras.src.losses.losses import Tversky
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_generalized_cross_entropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import circle
from keras.src.losses.losses import cosine_similarity
from keras.src.losses.losses import ctc
from keras.src.losses.losses import dice
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.losses.losses import tversky
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from keras.src.losses.losses import BinaryCrossentropy
from keras.src.losses.losses import BinaryFocalCrossentropy
from keras.src.losses.losses import CategoricalCrossentropy
from keras.src.losses.losses import CategoricalFocalCrossentropy
from keras.src.losses.losses import CategoricalHinge
from keras.src.losses.losses import Circle
from keras.src.losses.losses import CosineSimilarity
from keras.src.losses.losses import Dice
from keras.src.losses.losses import Hinge
from keras.src.losses.losses import Huber
from keras.src.losses.losses import KLDivergence
from keras.src.losses.losses import LogCosh
from keras.src.losses.losses import MeanAbsoluteError
from keras.src.losses.losses import MeanAbsolutePercentageError
from keras.src.losses.losses import MeanSquaredError
from keras.src.losses.losses import MeanSquaredLogarithmicError
from keras.src.losses.losses import Poisson
from keras.src.losses.losses import SparseCategoricalCrossentropy
from keras.src.losses.losses import SquaredHinge
from keras.src.losses.losses import Tversky
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import circle
from keras.src.losses.losses import cosine_similarity
from keras.src.losses.losses import ctc
from keras.src.losses.losses import dice
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.losses.losses import tversky
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Query Sparsity: Active Dimensions: 63.0, Sparsity Ratio: 0.9979
Model Corpus Sparsity: Active Dimensions: 63.4, Sparsity Ratio: 0.9979
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Query Sparsity: Active Dimensions: 48.1, Sparsity Ratio: 0.9984
Model Corpus Sparsity: Active Dimensions: 125.4, Sparsity Ratio: 0.9959
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Query Sparsity: Active Dimensions: 55.5, Sparsity Ratio: 0.9982
Model Corpus Sparsity: Active Dimensions: 94.4, Sparsity Ratio: 0.9969
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
datasets = ["QuoraRetrieval", "MSMARCO"]
evaluator = SparseNanoBEIREvaluator(
dataset_names=datasets,
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
results = evaluator(model)
"""
Evaluating NanoQuoraRetrieval
Information Retrieval Evaluation of the model on the NanoQuoraRetrieval dataset:
Queries: 50
Corpus: 5046
Score-Function: dot
Accuracy@1: 92.00%
Accuracy@3: 96.00%
Accuracy@5: 98.00%
Accuracy@10: 100.00%
Precision@1: 92.00%
Precision@3: 40.00%
Precision@5: 24.80%
Precision@10: 13.20%
Recall@1: 79.73%
Recall@3: 92.53%
Recall@5: 94.93%
Recall@10: 98.27%
MRR@10: 0.9439
NDCG@10: 0.9339
MAP@100: 0.9072
Model Sparsity Stats Query : Row Non-Zero Mean: 62.97999954223633, Row Sparsity Mean: 0.9979365468025208
Model Sparsity Stats Corpus : Row Non-Zero Mean: 63.39932632446289, Row Sparsity Mean: 0.9979228377342224
Information Retrieval Evaluation of the model on the NanoMSMARCO dataset:
Queries: 50
Corpus: 5043
Score-Function: dot
Accuracy@1: 48.00%
Accuracy@3: 74.00%
Accuracy@5: 76.00%
Accuracy@10: 88.00%
Precision@1: 48.00%
Precision@3: 24.67%
Precision@5: 15.20%
Precision@10: 8.80%
Recall@1: 48.00%
Recall@3: 74.00%
Recall@5: 76.00%
Recall@10: 88.00%
MRR@10: 0.6211
NDCG@10: 0.6838
MAP@100: 0.6277
Model Sparsity Stats Query : Row Non-Zero Mean: 48.08000183105469, Row Sparsity Mean: 0.9984247088432312
Model Sparsity Stats Corpus : Row Non-Zero Mean: 125.3604965209961, Row Sparsity Mean: 0.9958928227424622
Average Queries: 50.0
Average Corpus: 5044.5
Aggregated for Score Function: dot
Accuracy@1: 70.00%
Accuracy@3: 85.00%
Accuracy@5: 87.00%
Accuracy@10: 94.00%
Precision@1: 70.00%
Recall@1: 63.87%
Precision@3: 32.33%
Recall@3: 83.27%
Precision@5: 20.00%
Recall@5: 85.47%
Precision@10: 11.00%
Recall@10: 93.13%
MRR@10: 0.7825
NDCG@10: 0.8089
Model Sparsity Stats Query : Row Non-Zero Mean: 55.53000068664551, Row Sparsity Mean: 0.998180627822876
Model Sparsity Stats Corpus : Row Non-Zero Mean: 94.37991142272949, Row Sparsity Mean: 0.9969078302383423
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8089
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.31.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.30.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": None,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 2,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"use_rotary_positional_embeddings": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogVideoXTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": None,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogVideoXTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 2
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
# Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings.
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"patch_size_t": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"use_rotary_positional_embeddings": True,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogVideoXTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .v3det import V3DetDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset', 'V3DetDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset'
]
|
import time
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "binary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But usearch only supports "float32", "int8", and "binary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a usearch index yet, we can use semantic_search_usearch to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using usearch
results, search_time, corpus_index = semantic_search_usearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how usearch can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` usearch index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using usearch, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the usearch index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in usearch.
# 9. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import time
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 4. Choose a target precision for the corpus embeddings
corpus_precision = "binary"
# Valid options are: "float32", "uint8", "int8", "ubinary", and "binary"
# But usearch only supports "float32", "int8", and "binary"
# 5. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# Initially, we don't have a usearch index yet, we can use semantic_search_usearch to create it
corpus_index = None
while True:
# 7. Encode the queries using the full precision
start_time = time.time()
query_embeddings = model.encode(queries, normalize_embeddings=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 8. Perform semantic search using usearch
results, search_time, corpus_index = semantic_search_usearch(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=4,
exact=True,
output_index=True,
)
# This is a helper function to showcase how usearch can be used with quantized embeddings.
# You must either provide the `corpus_embeddings` or the `corpus_index` usearch index, but not both.
# In the first call we'll provide the `corpus_embeddings` and get the `corpus_index` back, which
# we'll use in the next call. In practice, the index is stored in RAM or saved to disk, and not
# recalculated for every query.
# This function will 1) quantize the query embeddings to the same precision as the corpus embeddings,
# 2) perform the semantic search using usearch, 3) rescore the results using the full precision embeddings,
# and 4) return the results and the search time (and perhaps the usearch index).
# `corpus_precision` must be the same as the precision used to quantize the corpus embeddings.
# It is used to convert the query embeddings to the same precision as the corpus embeddings.
# `top_k` determines how many results are returned for each query.
# `rescore_multiplier` is a parameter for the rescoring step. Rather than searching for the top_k results,
# we search for top_k * rescore_multiplier results and rescore the top_k results using the full precision embeddings.
# So, higher values of rescore_multiplier will give better results, but will be slower.
# `calibration_embeddings` is a set of embeddings used to calibrate the quantization of the query embeddings.
# This is important only if you are using uint8 or int8 precision. In practice, this is used to calculate
# the minimum and maximum values of each of the embedding dimensions, which are then used to determine the
# quantization thresholds.
# `rescore` determines whether to rescore the results using the full precision embeddings, if False & the
# corpus is quantized, the results will be very poor. `exact` determines whether to use the exact search
# or the approximate search method in usearch.
# 9. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
def test_preemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.preemphasis, (waveform, coeff))
def test_deemphasis(self):
waveform = torch.rand(3, 2, 100, device=self.device, dtype=self.dtype)
coeff = 0.9
self._assert_consistency(F.deemphasis, (waveform, coeff))
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
|
from __future__ import annotations
import torch
from sentence_transformers.models.Module import Module
class SpladePooling(Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM).
2. Applies a sparse transformation using an activation function followed by log1p (i.e., log(1 + activation(MLM_logits))).
3. Applies a pooling strategy `max` or `sum` to produce sparse embeddings.
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): Pooling method across token dimensions.
Choices:
- `sum`: Sum pooling (used in original SPLADE see https://arxiv.org/pdf/2107.05720).
- `max`: Max pooling (used in SPLADEv2 and later models see https://arxiv.org/pdf/2109.10086 or https://arxiv.org/pdf/2205.04733).
activation_function (str): Activation function applied before log1p transformation.
Choices:
- `relu`: ReLU activation (standard in all Splade models).
- `log1p_relu`: log(1 + ReLU(x)) variant used in Opensearch Splade models see arxiv.org/pdf/2504.14839.
word_embedding_dimension (int, optional): Dimensionality of the output embeddings (if needed).
"""
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
config_keys: list[str] = ["pooling_strategy", "activation_function", "word_embedding_dimension"]
def __init__(
self, pooling_strategy: str = "max", activation_function="relu", word_embedding_dimension: int = None
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.activation_function = activation_function
if activation_function not in self.SPLADE_ACTIVATION:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'token_embeddings' key as MLM logits.
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
if self.activation_function == "relu":
splade_scores = torch.log1p(torch.relu(mlm_logits))
elif self.activation_function == "log1p_relu":
splade_scores = torch.log1p(torch.log1p(torch.relu(mlm_logits)))
else:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM).
2. Applies a sparse transformation using an activation function followed by log1p (i.e., log(1 + activation(MLM_logits))).
3. Applies a pooling strategy `max` or `sum` to produce sparse embeddings.
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): Pooling method across token dimensions.
Choices:
- `sum`: Sum pooling (used in original SPLADE see https://arxiv.org/pdf/2107.05720).
- `max`: Max pooling (used in SPLADEv2 and later models see https://arxiv.org/pdf/2109.10086 or https://arxiv.org/pdf/2205.04733).
activation_function (str): Activation function applied before log1p transformation.
Choices:
- `relu`: ReLU activation (standard in all Splade models).
- `log1p_relu`: log(1 + ReLU(x)) variant used in Opensearch Splade models see arxiv.org/pdf/2504.14839.
word_embedding_dimension (int, optional): Dimensionality of the output embeddings (if needed).
"""
SPLADE_POOLING_MODES = ("sum", "max")
SPLADE_ACTIVATION = ["relu", "log1p_relu"]
def __init__(
self, pooling_strategy: str = "max", activation_function="relu", word_embedding_dimension: int = None
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.activation_function = activation_function
if activation_function not in self.SPLADE_ACTIVATION:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
self.config_keys = ["pooling_strategy", "activation_function", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'token_embeddings' key as MLM logits.
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
if self.activation_function == "relu":
splade_scores = torch.log1p(torch.relu(mlm_logits))
elif self.activation_function == "log1p_relu":
splade_scores = torch.log1p(torch.log1p(torch.relu(mlm_logits)))
else:
raise ValueError("activation_function must be either 'relu' or 'log1p_relu'")
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of ATSS. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of ATSS. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
from __future__ import annotations
__version__ = "3.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
__version__ = "3.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
__version__ = '0.30.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
__version__ = '0.21.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
from docarray.helper import login, logout
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .file_client import (BaseStorageBackend, FileClient, HardDiskBackend,
HTTPBackend, LmdbBackend, MemcachedBackend,
PetrelBackend)
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
from .io import dump, load, register_handler
from .parse import dict_from_file, list_from_file
__all__ = [
'BaseStorageBackend', 'FileClient', 'PetrelBackend', 'MemcachedBackend',
'LmdbBackend', 'HardDiskBackend', 'HTTPBackend', 'load', 'dump',
'register_handler', 'BaseFileHandler', 'JsonHandler', 'PickleHandler',
'YamlHandler', 'list_from_file', 'dict_from_file'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .file_client import BaseStorageBackend, FileClient
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
from .io import dump, load, register_handler
from .parse import dict_from_file, list_from_file
__all__ = [
'BaseStorageBackend', 'FileClient', 'load', 'dump', 'register_handler',
'BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler',
'list_from_file', 'dict_from_file'
]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_save(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
'dump_path': dump_path,
},
)
with f:
f.post(on='/save')
new_f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
},
)
with new_f:
result = new_f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert (
d.matches[0].scores['cosine'].value
<= d.matches[1].scores['cosine'].value
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import export_dump_streaming
from ...faiss_searcher import FaissSearcher
def _get_docs_from_vecs(queries):
docs = DocumentArray()
for q in queries:
doc = Document(embedding=q)
docs.append(doc)
return docs
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
metas['name'] = 'faiss_idx'
yield metas
del os.environ['TEST_WORKSPACE']
def test_save(metas, tmpdir):
vec_idx = np.random.randint(0, high=512, size=[512]).astype(str)
vec = np.array(np.random.random([512, 10]), dtype=np.float32)
query = np.array(np.random.random([10, 10]), dtype=np.float32)
query_docs = _get_docs_from_vecs(query)
export_dump_streaming(
os.path.join(tmpdir, 'dump'),
1,
len(vec_idx),
zip(vec_idx, vec, [b'' for _ in range(len(vec))]),
)
dump_path = os.path.join(tmpdir, 'dump')
f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
'dump_path': dump_path,
},
)
with f:
f.post(on='/save')
new_f = Flow().add(
uses=FaissSearcher,
timeout_ready=-1,
uses_with={
'index_key': 'Flat',
},
)
with new_f:
result = new_f.post(
on='/search', data=query_docs, return_results=True, parameters={'top_k': 4}
)[0].docs
assert len(result[0].matches) == 4
for d in result:
assert d.matches[0].scores['l2'].value >= d.matches[1].scores['l2'].value
|
from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(any_url=str(self))
|
from pydantic import AnyUrl as BaseAnyUrl
from docarray.document.base_node import BaseNode
from docarray.proto import NodeProto
class AnyUrl(BaseAnyUrl, BaseNode):
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to
be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(text=str(self))
|
__version__ = '0.13.13'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.12'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
import os
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.core import DataSplitMode
try:
import pandas as pd
import pyarrow as pa
import pyarrow.csv as pc
except ImportError:
pass
pytestmark = pytest.mark.skipif(
tm.no_arrow()["condition"] or tm.no_pandas()["condition"],
reason=tm.no_arrow()["reason"] + " or " + tm.no_pandas()["reason"],
)
dpath = "demo/data/"
class TestArrowTable:
def test_arrow_table(self):
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table)
assert dm.num_row() == 2
assert dm.num_col() == 4
def test_arrow_table_with_label(self):
df = pd.DataFrame([[1, 2.0, 3.0], [2, 3.0, 4.0]], columns=["a", "b", "c"])
table = pa.Table.from_pandas(df)
label = np.array([0, 1])
dm = xgb.DMatrix(table)
dm.set_label(label)
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([0, 1]))
def test_arrow_table_from_np(self):
coldata = np.array(
[[1.0, 1.0, 0.0, 0.0], [2.0, 0.0, 1.0, 0.0], [3.0, 0.0, 0.0, 1.0]]
)
cols = list(map(pa.array, coldata))
table = pa.Table.from_arrays(cols, ["a", "b", "c"])
dm = xgb.DMatrix(table)
assert dm.num_row() == 4
assert dm.num_col() == 3
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_arrow_train(self, DMatrixT):
import pandas as pd
rows = 100
X = pd.DataFrame(
{
"A": np.random.randint(0, 10, size=rows),
"B": np.random.randn(rows),
"C": np.random.permutation([1, 0] * (rows // 2)),
}
)
y = pd.Series(np.random.randn(rows))
table = pa.Table.from_pandas(X)
dtrain1 = DMatrixT(table)
dtrain1.set_label(pa.Table.from_pandas(pd.DataFrame(y)))
bst1 = xgb.train({}, dtrain1, num_boost_round=10)
preds1 = bst1.predict(DMatrixT(X))
dtrain2 = DMatrixT(X, y)
bst2 = xgb.train({}, dtrain2, num_boost_round=10)
preds2 = bst2.predict(DMatrixT(X))
np.testing.assert_allclose(preds1, preds2)
preds3 = bst2.inplace_predict(table)
np.testing.assert_allclose(preds1, preds3)
assert bst2.feature_names == ["A", "B", "C"]
assert bst2.feature_types == ["int", "float", "int"]
def test_arrow_survival(self):
data = os.path.join(tm.data_dir(__file__), "veterans_lung_cancer.csv")
table = pc.read_csv(data)
y_lower_bound = table["Survival_label_lower_bound"]
y_upper_bound = table["Survival_label_upper_bound"]
X = table.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"])
dtrain = xgb.DMatrix(
X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
)
y_np_up = dtrain.get_float_info("label_upper_bound")
y_np_low = dtrain.get_float_info("label_lower_bound")
np.testing.assert_equal(y_np_up, y_upper_bound.to_pandas().values)
np.testing.assert_equal(y_np_low, y_lower_bound.to_pandas().values)
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
class TestArrowTableColumnSplit:
def test_arrow_table(self):
def verify_arrow_table():
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table, data_split_mode=DataSplitMode.COL)
assert dm.num_row() == 2
assert dm.num_col() == 4 * xgb.collective.get_world_size()
tm.run_with_rabit(world_size=3, test_fn=verify_arrow_table)
|
import os
import sys
import unittest
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.core import DataSplitMode
try:
import pandas as pd
import pyarrow as pa
import pyarrow.csv as pc
except ImportError:
pass
pytestmark = pytest.mark.skipif(
tm.no_arrow()["condition"] or tm.no_pandas()["condition"],
reason=tm.no_arrow()["reason"] + " or " + tm.no_pandas()["reason"],
)
dpath = "demo/data/"
class TestArrowTable:
def test_arrow_table(self):
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table)
assert dm.num_row() == 2
assert dm.num_col() == 4
def test_arrow_table_with_label(self):
df = pd.DataFrame([[1, 2.0, 3.0], [2, 3.0, 4.0]], columns=["a", "b", "c"])
table = pa.Table.from_pandas(df)
label = np.array([0, 1])
dm = xgb.DMatrix(table)
dm.set_label(label)
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([0, 1]))
def test_arrow_table_from_np(self):
coldata = np.array(
[[1.0, 1.0, 0.0, 0.0], [2.0, 0.0, 1.0, 0.0], [3.0, 0.0, 0.0, 1.0]]
)
cols = list(map(pa.array, coldata))
table = pa.Table.from_arrays(cols, ["a", "b", "c"])
dm = xgb.DMatrix(table)
assert dm.num_row() == 4
assert dm.num_col() == 3
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_arrow_train(self, DMatrixT):
import pandas as pd
rows = 100
X = pd.DataFrame(
{
"A": np.random.randint(0, 10, size=rows),
"B": np.random.randn(rows),
"C": np.random.permutation([1, 0] * (rows // 2)),
}
)
y = pd.Series(np.random.randn(rows))
table = pa.Table.from_pandas(X)
dtrain1 = DMatrixT(table)
dtrain1.set_label(pa.Table.from_pandas(pd.DataFrame(y)))
bst1 = xgb.train({}, dtrain1, num_boost_round=10)
preds1 = bst1.predict(DMatrixT(X))
dtrain2 = DMatrixT(X, y)
bst2 = xgb.train({}, dtrain2, num_boost_round=10)
preds2 = bst2.predict(DMatrixT(X))
np.testing.assert_allclose(preds1, preds2)
preds3 = bst2.inplace_predict(table)
np.testing.assert_allclose(preds1, preds3)
assert bst2.feature_names == ["A", "B", "C"]
assert bst2.feature_types == ["int", "float", "int"]
def test_arrow_survival(self):
data = os.path.join(tm.data_dir(__file__), "veterans_lung_cancer.csv")
table = pc.read_csv(data)
y_lower_bound = table["Survival_label_lower_bound"]
y_upper_bound = table["Survival_label_upper_bound"]
X = table.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"])
dtrain = xgb.DMatrix(
X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
)
y_np_up = dtrain.get_float_info("label_upper_bound")
y_np_low = dtrain.get_float_info("label_lower_bound")
np.testing.assert_equal(y_np_up, y_upper_bound.to_pandas().values)
np.testing.assert_equal(y_np_low, y_lower_bound.to_pandas().values)
class TestArrowTableColumnSplit:
def test_arrow_table(self):
def verify_arrow_table():
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table, data_split_mode=DataSplitMode.COL)
assert dm.num_row() == 2
assert dm.num_col() == 4 * xgb.collective.get_world_size()
tm.run_with_rabit(world_size=3, test_fn=verify_arrow_table)
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
condition = results["flagged"] if self.openai_pre_1_0 else results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
"""Pass input through a moderation endpoint."""
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.utils import check_package_version, get_from_dict_or_env
from pydantic import Field, model_validator
from langchain.chains.base import Chain
class OpenAIModerationChain(Chain):
"""Pass input through a moderation endpoint.
To use, you should have the ``openai`` python package installed, and the
environment variable ``OPENAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.chains import OpenAIModerationChain
moderation = OpenAIModerationChain()
"""
client: Any = None #: :meta private:
async_client: Any = None #: :meta private:
model_name: Optional[str] = None
"""Moderation model name to use."""
error: bool = False
"""Whether or not to error if bad content was found."""
input_key: str = "input" #: :meta private:
output_key: str = "output" #: :meta private:
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
openai_pre_1_0: bool = Field(default=False)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
openai_api_key = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
openai_organization = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
openai.api_key = openai_api_key
if openai_organization:
openai.organization = openai_organization
values["openai_pre_1_0"] = False
try:
check_package_version("openai", gte_version="1.0")
except ValueError:
values["openai_pre_1_0"] = True
if values["openai_pre_1_0"]:
values["client"] = openai.Moderation # type: ignore[attr-defined]
else:
values["client"] = openai.OpenAI(api_key=openai_api_key)
values["async_client"] = openai.AsyncOpenAI(api_key=openai_api_key)
except ImportError:
msg = (
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
raise ImportError(msg)
return values
@property
def input_keys(self) -> list[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> list[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _moderate(self, text: str, results: Any) -> str:
if self.openai_pre_1_0:
condition = results["flagged"]
else:
condition = results.flagged
if condition:
error_str = "Text was found that violates OpenAI's content policy."
if self.error:
raise ValueError(error_str)
else:
return error_str
return text
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, Any]:
text = inputs[self.input_key]
if self.openai_pre_1_0:
results = self.client.create(text)
output = self._moderate(text, results["results"][0])
else:
results = self.client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> dict[str, Any]:
if self.openai_pre_1_0:
return await super()._acall(inputs, run_manager=run_manager)
text = inputs[self.input_key]
results = await self.async_client.moderations.create(input=text)
output = self._moderate(text, results.results[0])
return {self.output_key: output}
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-train-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-train-metas.pkl'))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
test_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='challenge2019/challenge-2019-validation-detection-bbox.txt',
label_file='challenge2019/cls-label-description.csv',
hierarchy_file='challenge2019/class_label_tree.np',
meta_file='challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file='challenge2019/challenge-2019-validation-'
'detection-human-imagelabels.csv'))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py']
model = dict(
roi_head=dict(bbox_head=dict(num_classes=500)),
test_cfg=dict(rcnn=dict(score_thr=0.01)))
# dataset settings
dataset_type = 'OpenImagesChallengeDataset'
data_root = 'data/OpenImages/'
data = dict(
train=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-train-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np'),
val=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np',
meta_file=data_root +
'challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-'
'human-imagelabels.csv'),
test=dict(
type=dataset_type,
ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-bbox.txt',
img_prefix=data_root + 'OpenImages/',
label_file=data_root + 'challenge2019/cls-label-description.csv',
hierarchy_file=data_root + 'challenge2019/class_label_tree.np',
meta_file=data_root +
'challenge2019/challenge-2019-validation-metas.pkl',
image_level_ann_file=data_root +
'challenge2019/challenge-2019-validation-detection-'
'human-imagelabels.csv'))
evaluation = dict(interval=1, metric='mAP')
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
"""
Compute image embeddings
"""
from __future__ import annotations
import os
from PIL import Image
from sentence_transformers import SentenceTransformer, util
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../examples/sentence_transformer/applications/image-search/two_dogs_in_snow.jpg",
)
img_emb = model.encode(Image.open(image_filepath))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)[0]
assert abs(cos_scores[0] - 0.3069) < 0.01
assert abs(cos_scores[1] - 0.1010) < 0.01
assert abs(cos_scores[2] - 0.1086) < 0.01
|
"""
Compute image embeddings
"""
from __future__ import annotations
import os
from PIL import Image
from sentence_transformers import SentenceTransformer, util
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../examples/applications/image-search/two_dogs_in_snow.jpg",
)
img_emb = model.encode(Image.open(image_filepath))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)[0]
assert abs(cos_scores[0] - 0.3069) < 0.01
assert abs(cos_scores[1] - 0.1010) < 0.01
assert abs(cos_scores[2] - 0.1086) < 0.01
|
from datasets import load_dataset
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity
# Initialize model components
model_name = "sentence-transformers/all-mpnet-base-v2"
transformer = Transformer(model_name)
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=32, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# Load a dataset with two text columns and a class label column
# Using the Quora Duplicates dataset as an example
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# Example of using multiple similarity functions
multi_sim_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_multi_sim",
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
show_progress_bar=True,
)
multi_sim_results = multi_sim_evaluator(model)
# Print the results with multiple similarity functions
print(f"Primary metric with multiple similarity functions: {multi_sim_evaluator.primary_metric}")
print(f"Primary metric value: {multi_sim_results[multi_sim_evaluator.primary_metric]:.4f}")
# Print all metrics for comparison
print("\nComparison of similarity functions:")
for sim_fn in ["cosine", "dot", "euclidean", "manhattan"]:
print(f"\n{sim_fn.upper()} SIMILARITY:")
print(f" Accuracy: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_accuracy']:.4f}")
print(f" F1: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_f1']:.4f}")
print(f" Precision: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_precision']:.4f}")
print(f" Recall: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_recall']:.4f}")
print(f" AP: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_ap']:.4f}")
print(f" MCC: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_mcc']:.4f}")
|
from datasets import load_dataset
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity
# Initialize model components
model_name = "sentence-transformers/all-mpnet-base-v2"
transformer = Transformer(model_name)
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=32, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# Load a dataset with two text columns and a class label column
# Using the Quora Duplicates dataset as an example
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
)
results = binary_acc_evaluator(model)
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# Example of using multiple similarity functions
multi_sim_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_multi_sim",
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
show_progress_bar=True,
)
multi_sim_results = multi_sim_evaluator(model)
# Print the results with multiple similarity functions
print(f"Primary metric with multiple similarity functions: {multi_sim_evaluator.primary_metric}")
print(f"Primary metric value: {multi_sim_results[multi_sim_evaluator.primary_metric]:.4f}")
# Print all metrics for comparison
print("\nComparison of similarity functions:")
for sim_fn in ["cosine", "dot", "euclidean", "manhattan"]:
print(f"\n{sim_fn.upper()} SIMILARITY:")
print(f" Accuracy: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_accuracy']:.4f}")
print(f" F1: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_f1']:.4f}")
print(f" Precision: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_precision']:.4f}")
print(f" Recall: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_recall']:.4f}")
print(f" AP: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_ap']:.4f}")
print(f" MCC: {multi_sim_results[f'quora_duplicates_multi_sim_{sim_fn}_mcc']:.4f}")
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
def get_module_from_name(module, tensor_name: str) -> tuple[Any, str]:
if "." in tensor_name:
module_name, tensor_name = tensor_name.rsplit(".", 1)
module = module.get_submodule(module_name)
return module, tensor_name
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Tuple
def get_module_from_name(module, tensor_name: str) -> Tuple[Any, str]:
if "." in tensor_name:
module_name, tensor_name = tensor_name.rsplit(".", 1)
module = module.get_submodule(module_name)
return module, tensor_name
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
import numpy as np
from .any_url import AnyUrl
class ImageUrl(AnyUrl):
def load(self) -> np.ndarray:
"""
transform the url in a image Tensor
this is just a patch we will move the function from old docarray
:return: tensor image
"""
return np.zeros((3, 224, 224))
|
import numpy as np
from docarray.typing import Tensor
from .any_url import AnyUrl
class ImageUrl(AnyUrl):
def load(self) -> Tensor:
"""
transform the url in a image Tensor
this is just a patch we will move the function from old docarray
:return: tensor image
"""
return np.zeros((3, 224, 224))
|
import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
class TestPlotting:
def test_plotting(self):
m, _ = tm.load_agaricus(__file__)
booster = xgb.train({'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}, m,
num_boost_round=2)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'Importance score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color='r',
title='t', xlabel='x', ylabel='y')
assert isinstance(ax, Axes)
assert ax.get_title() == 't'
assert ax.get_xlabel() == 'x'
assert ax.get_ylabel() == 'y'
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax, Axes)
assert ax.get_title() == ''
assert ax.get_xlabel() == ''
assert ax.get_ylabel() == ''
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, tree_idx=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, tree_idx=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0., 11.)
assert ax.get_ylim() == (-1., 71.)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0., 5.)
assert ax.get_ylim() == (10., 71.)
def run_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(1000, 31, 19, onehot=False)
reg = xgb.XGBRegressor(
enable_categorical=True, n_estimators=10, tree_method=tree_method
)
reg.fit(X, y)
trees = reg.get_booster().get_dump(dump_format="json")
for tree in trees:
j_tree = json.loads(tree)
assert "leaf" in j_tree.keys() or isinstance(
j_tree["split_condition"], list
)
graph = xgb.to_graphviz(reg, tree_idx=len(j_tree) - 1)
assert isinstance(graph, Source)
ax = xgb.plot_tree(reg, tree_idx=len(j_tree) - 1)
assert isinstance(ax, Axes)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self) -> None:
self.run_categorical("approx")
|
import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
class TestPlotting:
def test_plotting(self):
m, _ = tm.load_agaricus(__file__)
booster = xgb.train({'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}, m,
num_boost_round=2)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'Importance score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color='r',
title='t', xlabel='x', ylabel='y')
assert isinstance(ax, Axes)
assert ax.get_title() == 't'
assert ax.get_xlabel() == 'x'
assert ax.get_ylabel() == 'y'
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax, Axes)
assert ax.get_title() == ''
assert ax.get_xlabel() == ''
assert ax.get_ylabel() == ''
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, num_trees=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, num_trees=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0., 11.)
assert ax.get_ylim() == (-1., 71.)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0., 5.)
assert ax.get_ylim() == (10., 71.)
def run_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(1000, 31, 19, onehot=False)
reg = xgb.XGBRegressor(
enable_categorical=True, n_estimators=10, tree_method=tree_method
)
reg.fit(X, y)
trees = reg.get_booster().get_dump(dump_format="json")
for tree in trees:
j_tree = json.loads(tree)
assert "leaf" in j_tree.keys() or isinstance(
j_tree["split_condition"], list
)
graph = xgb.to_graphviz(reg, num_trees=len(j_tree) - 1)
assert isinstance(graph, Source)
ax = xgb.plot_tree(reg, num_trees=len(j_tree) - 1)
assert isinstance(ax, Axes)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self) -> None:
self.run_categorical("approx")
|
import argparse
import os
from typing import List, Union
from jina.parsers.helper import CastHostAction
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://docs.jina.ai',
'authors': 'dev-team@jina.ai',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, CastToIntAction, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(
a,
(
_StoreAction,
_StoreTrueAction,
KVAppendAction,
CastToIntAction,
CastHostAction,
),
):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
elif isinstance(a, CastToIntAction):
ddd['type'] = int
elif isinstance(a, CastHostAction):
ddd['type'] = str
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
import argparse
import os
from typing import List, Union
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://docs.jina.ai',
'authors': 'dev-team@jina.ai',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, CastToIntAction, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(
a, (_StoreAction, _StoreTrueAction, KVAppendAction, CastToIntAction)
):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
elif isinstance(a, CastToIntAction):
ddd['type'] = int
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_flax_available,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["multicontrolnet"] = ["MultiControlNetModel"]
_import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"]
_import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"]
_import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"]
_import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"]
_import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"]
_import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"]
_import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"]
_import_structure["pipeline_controlnet_union_inpaint_sd_xl"] = ["StableDiffusionXLControlNetUnionInpaintPipeline"]
_import_structure["pipeline_controlnet_union_sd_xl"] = ["StableDiffusionXLControlNetUnionPipeline"]
_import_structure["pipeline_controlnet_union_sd_xl_img2img"] = ["StableDiffusionXLControlNetUnionImg2ImgPipeline"]
try:
if not (is_transformers_available() and is_flax_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_flax_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects))
else:
_import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline
from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline
from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline
from .pipeline_controlnet_union_inpaint_sd_xl import StableDiffusionXLControlNetUnionInpaintPipeline
from .pipeline_controlnet_union_sd_xl import StableDiffusionXLControlNetUnionPipeline
from .pipeline_controlnet_union_sd_xl_img2img import StableDiffusionXLControlNetUnionImg2ImgPipeline
try:
if not (is_transformers_available() and is_flax_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_flax_available,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["multicontrolnet"] = ["MultiControlNetModel"]
_import_structure["pipeline_controlnet"] = ["StableDiffusionControlNetPipeline"]
_import_structure["pipeline_controlnet_blip_diffusion"] = ["BlipDiffusionControlNetPipeline"]
_import_structure["pipeline_controlnet_img2img"] = ["StableDiffusionControlNetImg2ImgPipeline"]
_import_structure["pipeline_controlnet_inpaint"] = ["StableDiffusionControlNetInpaintPipeline"]
_import_structure["pipeline_controlnet_inpaint_sd_xl"] = ["StableDiffusionXLControlNetInpaintPipeline"]
_import_structure["pipeline_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPipeline"]
_import_structure["pipeline_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetImg2ImgPipeline"]
try:
if not (is_transformers_available() and is_flax_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_flax_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_flax_and_transformers_objects))
else:
_import_structure["pipeline_flax_controlnet"] = ["FlaxStableDiffusionControlNetPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_blip_diffusion import BlipDiffusionControlNetPipeline
from .pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
from .pipeline_controlnet_inpaint_sd_xl import StableDiffusionXLControlNetInpaintPipeline
from .pipeline_controlnet_sd_xl import StableDiffusionXLControlNetPipeline
from .pipeline_controlnet_sd_xl_img2img import StableDiffusionXLControlNetImg2ImgPipeline
try:
if not (is_transformers_available() and is_flax_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
import ast
from collections import defaultdict
# Function to perform topological sorting
def topological_sort(dependencies: dict) -> list[list[str]]:
"""Given the dependencies graph construct sorted list of list of modular files
For example, returned list of lists might be:
[
["../modular_llama.py", "../modular_gemma.py"], # level 0
["../modular_llama4.py", "../modular_gemma2.py"], # level 1
["../modular_glm4.py"], # level 2
]
which means llama and gemma do not depend on any other modular models, while llama4 and gemma2
depend on the models in the first list, and glm4 depends on the models in the second and (optionally) in the first list.
"""
# Nodes are the name of the models to convert (we only add those to the graph)
nodes = {node.rsplit("modular_", 1)[1].replace(".py", "") for node in dependencies.keys()}
# This will be a graph from models to convert, to models to convert that should be converted before (as they are a dependency)
graph = {}
name_mapping = {}
for node, deps in dependencies.items():
node_name = node.rsplit("modular_", 1)[1].replace(".py", "")
dep_names = {dep.split(".")[-2] for dep in deps}
dependencies = {dep for dep in dep_names if dep in nodes and dep != node_name}
graph[node_name] = dependencies
name_mapping[node_name] = node
sorting_list = []
while len(graph) > 0:
# Find the nodes with 0 out-degree
leaf_nodes = {node for node in graph if len(graph[node]) == 0}
# Add them to the list as next level
sorting_list.append([name_mapping[node] for node in leaf_nodes])
# Remove the leafs from the graph (and from the deps of other nodes)
graph = {node: deps - leaf_nodes for node, deps in graph.items() if node not in leaf_nodes}
return sorting_list
# Function to extract class and import info from a file
def extract_classes_and_imports(file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
imports = set()
for node in ast.walk(tree):
if isinstance(node, (ast.Import, ast.ImportFrom)):
module = node.module if isinstance(node, ast.ImportFrom) else None
if module and (".modeling_" in module or "transformers.models" in module):
imports.add(module)
return imports
# Function to map dependencies between classes
def map_dependencies(py_files):
dependencies = defaultdict(set)
# First pass: Extract all classes and map to files
for file_path in py_files:
# dependencies[file_path].add(None)
class_to_file = extract_classes_and_imports(file_path)
for module in class_to_file:
dependencies[file_path].add(module)
return dependencies
def find_priority_list(py_files):
"""
Given a list of modular files, sorts them by topological order. Modular models that DON'T depend on other modular
models will be higher in the topological order.
Args:
py_files: List of paths to the modular files
Returns:
Ordered list of lists of files and their dependencies (dict)
For example, ordered_files might be:
[
["../modular_llama.py", "../modular_gemma.py"], # level 0
["../modular_llama4.py", "../modular_gemma2.py"], # level 1
["../modular_glm4.py"], # level 2
]
which means llama and gemma do not depend on any other modular models, while llama4 and gemma2
depend on the models in the first list, and glm4 depends on the models in the second and (optionally) in the first list.
"""
dependencies = map_dependencies(py_files)
ordered_files = topological_sort(dependencies)
return ordered_files, dependencies
|
import ast
from collections import defaultdict
# Function to perform topological sorting
def topological_sort(dependencies: dict):
# Nodes are the name of the models to convert (we only add those to the graph)
nodes = {node.rsplit("modular_", 1)[1].replace(".py", "") for node in dependencies.keys()}
# This will be a graph from models to convert, to models to convert that should be converted before (as they are a dependency)
graph = {}
name_mapping = {}
for node, deps in dependencies.items():
node_name = node.rsplit("modular_", 1)[1].replace(".py", "")
dep_names = {dep.split(".")[-2] for dep in deps}
dependencies = {dep for dep in dep_names if dep in nodes and dep != node_name}
graph[node_name] = dependencies
name_mapping[node_name] = node
sorting_list = []
while len(graph) > 0:
# Find the nodes with 0 out-degree
leaf_nodes = {node for node in graph if len(graph[node]) == 0}
# Add them to the list
sorting_list += list(leaf_nodes)
# Remove the leafs from the graph (and from the deps of other nodes)
graph = {node: deps - leaf_nodes for node, deps in graph.items() if node not in leaf_nodes}
return [name_mapping[x] for x in sorting_list]
# Function to extract class and import info from a file
def extract_classes_and_imports(file_path):
with open(file_path, "r", encoding="utf-8") as file:
tree = ast.parse(file.read(), filename=file_path)
imports = set()
for node in ast.walk(tree):
if isinstance(node, (ast.Import, ast.ImportFrom)):
module = node.module if isinstance(node, ast.ImportFrom) else None
if module and (".modeling_" in module or "transformers.models" in module):
imports.add(module)
return imports
# Function to map dependencies between classes
def map_dependencies(py_files):
dependencies = defaultdict(set)
# First pass: Extract all classes and map to files
for file_path in py_files:
# dependencies[file_path].add(None)
class_to_file = extract_classes_and_imports(file_path)
for module in class_to_file:
dependencies[file_path].add(module)
return dependencies
def find_priority_list(py_files):
"""
Given a list of modular files, sorts them by topological order. Modular models that DON'T depend on other modular
models will be higher in the topological order.
Args:
py_files: List of paths to the modular files
Returns:
A tuple with the ordered files (list) and their dependencies (dict)
"""
dependencies = map_dependencies(py_files)
ordered_files = topological_sort(dependencies)
return ordered_files, dependencies
|
"""
OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages.
On the website, you can download parallel datasets for many languages in different formats. I found that
the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal
overhead for post-processing to get it into a suitable format for this library.
You can use the OPUS dataset to create multilingual sentence embeddings. This script contains code to download
OPUS datasets for the desired languages and to create training files in the right format.
1) First, you need to install OpusTools (https://github.com/Helsinki-NLP/OpusTools/tree/master/opustools_pkg):
pip install opustools
2) Once you have OpusTools installed, you can download data in the right format via:
mkdir parallel-sentences
opus_read -d [CORPUS] -s [SRC_LANG] -t [TRG_LANG] --write parallel-sentences/[FILENAME].tsv.gz -wm moses -dl opus -p raw
For example:
mkdir parallel-sentences
opus_read -d JW300 -s en -t de --write parallel-sentences/JW300-en-de.tsv.gz -wm moses -dl opus -p raw
This downloads the JW300 Corpus (http://opus.nlpl.eu/JW300.php) for English (en) and German (de) and write the output to
parallel-sentences/JW300-en-de.tsv.gz
####################
This python code automates the download and creation of the parallel sentences files.
"""
import os
from opustools import OpusRead
corpora = ["JW300"] # Corpora you want to use
source_languages = ["en"] # Source language, our teacher model is able to understand
target_languages = ["de", "es", "it", "fr", "ar", "tr"] # Target languages, out student model should learn
output_folder = "parallel-sentences"
opus_download_folder = "./opus"
# Iterator over all corpora / source languages / target languages combinations and download files
os.makedirs(output_folder, exist_ok=True)
for corpus in corpora:
for src_lang in source_languages:
for trg_lang in target_languages:
output_filename = os.path.join(output_folder, f"{corpus}-{src_lang}-{trg_lang}.tsv.gz")
if not os.path.exists(output_filename):
print("Create:", output_filename)
try:
read = OpusRead(
directory=corpus,
source=src_lang,
target=trg_lang,
write=[output_filename],
download_dir=opus_download_folder,
preprocess="raw",
write_mode="moses",
suppress_prompts=True,
)
read.printPairs()
except Exception:
print("An error occurred during the creation of", output_filename)
|
"""
OPUS (http://opus.nlpl.eu/) is a great collection of different parallel datasets for more than 400 languages.
On the website, you can download parallel datasets for many languages in different formats. I found that
the format "Bottom-left triangle: download plain text files (MOSES/GIZA++)" requires minimal
overhead for post-processing to get it into a suitable format for this library.
You can use the OPUS dataset to create multilingual sentence embeddings. This script contains code to download
OPUS datasets for the desired languages and to create training files in the right format.
1) First, you need to install OpusTools (https://github.com/Helsinki-NLP/OpusTools/tree/master/opustools_pkg):
pip install opustools
2) Once you have OpusTools installed, you can download data in the right format via:
mkdir parallel-sentences
opus_read -d [CORPUS] -s [SRC_LANG] -t [TRG_LANG] --write parallel-sentences/[FILENAME].tsv.gz -wm moses -dl opus -p raw
For example:
mkdir parallel-sentences
opus_read -d JW300 -s en -t de --write parallel-sentences/JW300-en-de.tsv.gz -wm moses -dl opus -p raw
This downloads the JW300 Corpus (http://opus.nlpl.eu/JW300.php) for English (en) and German (de) and write the output to
parallel-sentences/JW300-en-de.tsv.gz
####################
This python code automates the download and creation of the parallel sentences files.
"""
import os
from opustools import OpusRead
corpora = ["JW300"] # Corpora you want to use
source_languages = ["en"] # Source language, our teacher model is able to understand
target_languages = ["de", "es", "it", "fr", "ar", "tr"] # Target languages, out student model should learn
output_folder = "parallel-sentences"
opus_download_folder = "./opus"
# Iterator over all corpora / source languages / target languages combinations and download files
os.makedirs(output_folder, exist_ok=True)
for corpus in corpora:
for src_lang in source_languages:
for trg_lang in target_languages:
output_filename = os.path.join(output_folder, "{}-{}-{}.tsv.gz".format(corpus, src_lang, trg_lang))
if not os.path.exists(output_filename):
print("Create:", output_filename)
try:
read = OpusRead(
directory=corpus,
source=src_lang,
target=trg_lang,
write=[output_filename],
download_dir=opus_download_folder,
preprocess="raw",
write_mode="moses",
suppress_prompts=True,
)
read.printPairs()
except Exception:
print("An error occurred during the creation of", output_filename)
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='RandomResize', scale=[(320, 320), (416, 416)], keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
from collections import namedtuple
from typing import Any, Callable, Optional, TypeVar
from typing_extensions import NamedTuple
import torch.return_types
from torch.utils._pytree import PyTree, tree_flatten, TreeSpec
FlattenFuncSpec = Callable[[PyTree, TreeSpec], list]
FlattenFuncExactMatchSpec = Callable[[PyTree, TreeSpec], bool]
SUPPORTED_NODES: dict[type[Any], FlattenFuncSpec] = {}
SUPPORTED_NODES_EXACT_MATCH: dict[type[Any], Optional[FlattenFuncExactMatchSpec]] = {}
_T = TypeVar("_T")
_K = TypeVar("_K")
_V = TypeVar("_V")
def register_pytree_flatten_spec(
cls: type[Any],
flatten_fn_spec: FlattenFuncSpec,
flatten_fn_exact_match_spec: Optional[FlattenFuncExactMatchSpec] = None,
) -> None:
SUPPORTED_NODES[cls] = flatten_fn_spec
SUPPORTED_NODES_EXACT_MATCH[cls] = flatten_fn_exact_match_spec
def _deregister_pytree_flatten_spec(
cls: type[Any],
) -> None:
del SUPPORTED_NODES[cls]
del SUPPORTED_NODES_EXACT_MATCH[cls]
def tree_flatten_spec(
pytree: PyTree,
spec: TreeSpec,
) -> list[Any]:
if spec.is_leaf():
return [pytree]
# I guess these exist for BC, FC reasons.
# In general, we should be able to directly
# use pytree tree flattener to flatten them,
# as export serializes the pytree separately.
# Will remove it in follow up PR.
if spec.type in SUPPORTED_NODES:
flatten_fn_spec = SUPPORTED_NODES[spec.type]
child_pytrees = flatten_fn_spec(pytree, spec)
result = []
for child, child_spec in zip(child_pytrees, spec.children_specs):
flat = tree_flatten_spec(child, child_spec)
result += flat
return result
flat_result, real_spec = tree_flatten(pytree)
if spec != real_spec:
raise RuntimeError(
f"Real spec {real_spec} of object {pytree} is different from expected spec {spec}. "
f"Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml"
)
return flat_result
def _dict_flatten_spec(d: dict[_K, _V], spec: TreeSpec) -> list[_V]:
return [d[k] for k in spec.context]
def _list_flatten_spec(d: list[_T], spec: TreeSpec) -> list[_T]:
return [d[i] for i in range(spec.num_children)]
def _tuple_flatten_spec(d: tuple[_T, ...], spec: TreeSpec) -> list[_T]:
return [d[i] for i in range(spec.num_children)]
def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> list[Any]:
return [d[i] for i in range(spec.num_children)]
def _dict_flatten_spec_exact_match(d: dict[_K, _V], spec: TreeSpec) -> bool:
return len(d) == spec.num_children
def _list_flatten_spec_exact_match(d: list[_T], spec: TreeSpec) -> bool:
return len(d) == spec.num_children
def _tuple_flatten_spec_exact_match(d: tuple[_T, ...], spec: TreeSpec) -> bool:
return len(d) == spec.num_children
def _namedtuple_flatten_spec_exact_match(d: NamedTuple, spec: TreeSpec) -> bool:
return len(d) == spec.num_children
register_pytree_flatten_spec(dict, _dict_flatten_spec, _dict_flatten_spec_exact_match)
register_pytree_flatten_spec(list, _list_flatten_spec, _list_flatten_spec_exact_match)
register_pytree_flatten_spec(
tuple,
_tuple_flatten_spec,
_tuple_flatten_spec_exact_match,
)
for return_type in torch.return_types.all_return_types:
register_pytree_flatten_spec(
return_type,
_tuple_flatten_spec,
_tuple_flatten_spec_exact_match,
)
register_pytree_flatten_spec(
namedtuple, # type: ignore[arg-type]
_namedtuple_flatten_spec,
_namedtuple_flatten_spec_exact_match,
)
|
from collections import namedtuple
from typing import Any, Callable, Optional, TypeVar
from typing_extensions import NamedTuple
import torch.return_types
from torch.utils._pytree import PyTree, tree_flatten, TreeSpec
FlattenFuncSpec = Callable[[PyTree, TreeSpec], list]
FlattenFuncExactMatchSpec = Callable[[PyTree, TreeSpec], bool]
SUPPORTED_NODES: dict[type[Any], FlattenFuncSpec] = {}
SUPPORTED_NODES_EXACT_MATCH: dict[type[Any], Optional[FlattenFuncExactMatchSpec]] = {}
_T = TypeVar("_T")
_K = TypeVar("_K")
_V = TypeVar("_V")
def register_pytree_flatten_spec(
cls: type[Any],
flatten_fn_spec: FlattenFuncSpec,
flatten_fn_exact_match_spec: Optional[FlattenFuncExactMatchSpec] = None,
) -> None:
SUPPORTED_NODES[cls] = flatten_fn_spec
SUPPORTED_NODES_EXACT_MATCH[cls] = flatten_fn_exact_match_spec
def _deregister_pytree_flatten_spec(
cls: type[Any],
) -> None:
del SUPPORTED_NODES[cls]
del SUPPORTED_NODES_EXACT_MATCH[cls]
def tree_flatten_spec(
pytree: PyTree,
spec: TreeSpec,
) -> list[Any]:
if spec.is_leaf():
return [pytree]
# I guess these exist for BC, FC reasons.
# In general, we should be able to directly
# use pytree tree flattener to flatten them,
# as export serializes the pytree seperately.
# Will remove it in follow up PR.
if spec.type in SUPPORTED_NODES:
flatten_fn_spec = SUPPORTED_NODES[spec.type]
child_pytrees = flatten_fn_spec(pytree, spec)
result = []
for child, child_spec in zip(child_pytrees, spec.children_specs):
flat = tree_flatten_spec(child, child_spec)
result += flat
return result
flat_result, real_spec = tree_flatten(pytree)
if spec != real_spec:
raise RuntimeError(
f"Real spec {real_spec} of object {pytree} is different from expected spec {spec}. "
f"Please file an issue at https://github.com/pytorch/pytorch/issues/new?template=bug-report.yml"
)
return flat_result
def _dict_flatten_spec(d: dict[_K, _V], spec: TreeSpec) -> list[_V]:
return [d[k] for k in spec.context]
def _list_flatten_spec(d: list[_T], spec: TreeSpec) -> list[_T]:
return [d[i] for i in range(spec.num_children)]
def _tuple_flatten_spec(d: tuple[_T, ...], spec: TreeSpec) -> list[_T]:
return [d[i] for i in range(spec.num_children)]
def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> list[Any]:
return [d[i] for i in range(spec.num_children)]
def _dict_flatten_spec_exact_match(d: dict[_K, _V], spec: TreeSpec) -> bool:
return len(d) == spec.num_children
def _list_flatten_spec_exact_match(d: list[_T], spec: TreeSpec) -> bool:
return len(d) == spec.num_children
def _tuple_flatten_spec_exact_match(d: tuple[_T, ...], spec: TreeSpec) -> bool:
return len(d) == spec.num_children
def _namedtuple_flatten_spec_exact_match(d: NamedTuple, spec: TreeSpec) -> bool:
return len(d) == spec.num_children
register_pytree_flatten_spec(dict, _dict_flatten_spec, _dict_flatten_spec_exact_match)
register_pytree_flatten_spec(list, _list_flatten_spec, _list_flatten_spec_exact_match)
register_pytree_flatten_spec(
tuple,
_tuple_flatten_spec,
_tuple_flatten_spec_exact_match,
)
for return_type in torch.return_types.all_return_types:
register_pytree_flatten_spec(
return_type,
_tuple_flatten_spec,
_tuple_flatten_spec_exact_match,
)
register_pytree_flatten_spec(
namedtuple, # type: ignore[arg-type]
_namedtuple_flatten_spec,
_namedtuple_flatten_spec_exact_match,
)
|
from typing import Any, Collection, List, Optional, Tuple, Union
from llama_index.core.tools.types import AsyncBaseTool
from pydantic import BaseModel
class LLMCompilerParseResult(BaseModel):
"""LLMCompiler parser result."""
thought: str
idx: int
tool_name: str
args: str
class JoinerOutput(BaseModel):
"""Joiner output."""
thought: str
answer: str
is_replan: bool = False
def _default_stringify_rule_for_arguments(args: Union[List, Tuple]) -> str:
if len(args) == 1:
return str(args[0])
else:
return str(tuple(args))
class LLMCompilerTask(BaseModel):
"""
LLM Compiler Task.
Object taken from
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/task_fetching_unit.py.
"""
idx: int
name: str
# tool: Callable
tool: AsyncBaseTool
args: Union[List, Tuple]
dependencies: Collection[int]
# TODO: look into this
# stringify_rule: Optional[Callable] = None
thought: Optional[str] = None
observation: Optional[str] = None
is_join: bool = False
class Config:
arbitrary_types_allowed = True
async def __call__(self) -> Any:
return await self.tool.acall(*self.args)
def get_thought_action_observation(
self,
include_action: bool = True,
include_thought: bool = True,
include_action_idx: bool = False,
) -> str:
thought_action_observation = ""
if self.thought and include_thought:
thought_action_observation = f"Thought: {self.thought}\n"
if include_action:
idx = f"{self.idx}. " if include_action_idx else ""
# if self.stringify_rule:
# # If the user has specified a custom stringify rule for the
# # function argument, use it
# thought_action_observation += f"{idx}{self.stringify_rule(self.args)}\n"
# else:
# Otherwise, we have a default stringify rule
thought_action_observation += (
f"{idx}{self.name}"
f"{_default_stringify_rule_for_arguments(self.args)}\n"
)
if self.observation is not None:
thought_action_observation += f"Observation: {self.observation}\n"
return thought_action_observation
|
from typing import Any, Collection, List, Optional, Tuple, Union
from llama_index.core.tools.types import AsyncBaseTool
from pydantic import BaseModel
class LLMCompilerParseResult(BaseModel):
"""LLMCompiler parser result."""
thought: str
idx: int
tool_name: str
args: str
class JoinerOutput(BaseModel):
"""Joiner output."""
thought: str
answer: str
is_replan: bool = False
def _default_stringify_rule_for_arguments(args: Union[List, Tuple]) -> str:
if len(args) == 1:
return str(args[0])
else:
return str(tuple(args))
class LLMCompilerTask(BaseModel):
"""LLM Compiler Task.
Object taken from
https://github.com/SqueezeAILab/LLMCompiler/blob/main/src/llm_compiler/task_fetching_unit.py.
"""
idx: int
name: str
# tool: Callable
tool: AsyncBaseTool
args: Union[List, Tuple]
dependencies: Collection[int]
# TODO: look into this
# stringify_rule: Optional[Callable] = None
thought: Optional[str] = None
observation: Optional[str] = None
is_join: bool = False
class Config:
arbitrary_types_allowed = True
async def __call__(self) -> Any:
return await self.tool.acall(*self.args)
def get_thought_action_observation(
self,
include_action: bool = True,
include_thought: bool = True,
include_action_idx: bool = False,
) -> str:
thought_action_observation = ""
if self.thought and include_thought:
thought_action_observation = f"Thought: {self.thought}\n"
if include_action:
idx = f"{self.idx}. " if include_action_idx else ""
# if self.stringify_rule:
# # If the user has specified a custom stringify rule for the
# # function argument, use it
# thought_action_observation += f"{idx}{self.stringify_rule(self.args)}\n"
# else:
# Otherwise, we have a default stringify rule
thought_action_observation += (
f"{idx}{self.name}"
f"{_default_stringify_rule_for_arguments(self.args)}\n"
)
if self.observation is not None:
thought_action_observation += f"Observation: {self.observation}\n"
return thought_action_observation
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .mask_hungarian_assigner import MaskHungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner', 'MaskHungarianAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner'
]
|
"""Load agent."""
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import load_agent
from langchain.agents.types import AGENT_TO_CLASS
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
raise ValueError(
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
try:
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
raise ValueError(
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
"""Load agent."""
from collections.abc import Sequence
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.agents.loading import AGENT_TO_CLASS, load_agent
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
def initialize_agent(
tools: Sequence[BaseTool],
llm: BaseLanguageModel,
agent: Optional[AgentType] = None,
callback_manager: Optional[BaseCallbackManager] = None,
agent_path: Optional[str] = None,
agent_kwargs: Optional[dict] = None,
*,
tags: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> AgentExecutor:
"""Load an agent executor given tools and LLM.
Args:
tools: List of tools this agent has access to.
llm: Language model to use as the agent.
agent: Agent type to use. If None and agent_path is also None, will default
to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
callback_manager: CallbackManager to use. Global callback manager is used if
not provided. Defaults to None.
agent_path: Path to serialized agent to use. If None and agent is also None,
will default to AgentType.ZERO_SHOT_REACT_DESCRIPTION. Defaults to None.
agent_kwargs: Additional keyword arguments to pass to the underlying agent.
Defaults to None.
tags: Tags to apply to the traced runs. Defaults to None.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If both `agent` and `agent_path` are specified.
ValueError: If `agent` is not a valid agent type.
ValueError: If both `agent` and `agent_path` are None.
"""
tags_ = list(tags) if tags else []
if agent is None and agent_path is None:
agent = AgentType.ZERO_SHOT_REACT_DESCRIPTION
if agent is not None and agent_path is not None:
raise ValueError(
"Both `agent` and `agent_path` are specified, "
"but at most only one should be."
)
if agent is not None:
if agent not in AGENT_TO_CLASS:
raise ValueError(
f"Got unknown agent type: {agent}. "
f"Valid types are: {AGENT_TO_CLASS.keys()}."
)
tags_.append(agent.value if isinstance(agent, AgentType) else agent)
agent_cls = AGENT_TO_CLASS[agent]
agent_kwargs = agent_kwargs or {}
agent_obj = agent_cls.from_llm_and_tools(
llm, tools, callback_manager=callback_manager, **agent_kwargs
)
elif agent_path is not None:
agent_obj = load_agent(
agent_path, llm=llm, tools=tools, callback_manager=callback_manager
)
try:
# TODO: Add tags from the serialized object directly.
tags_.append(agent_obj._agent_type)
except NotImplementedError:
pass
else:
raise ValueError(
"Somehow both `agent` and `agent_path` are None, this should never happen."
)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
tags=tags_,
**kwargs,
)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py'
# training schedule
max_epochs = 12
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.0001, by_epoch=False, begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
|
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
# training schedule
max_epochs = 12
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.0001, by_epoch=False, begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .image_processing_nougat import *
from .image_processing_nougat_fast import *
from .processing_nougat import *
from .tokenization_nougat_fast import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .image_processing_nougat import *
from .processing_nougat import *
from .tokenization_nougat_fast import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for docs in self.streamer.stream_docs(
docs=DocumentArray([Document(text=text)]),
exec_endpoint='/debug',
):
doc = docs[0]
return {'text': doc.text, 'tags': doc.tags}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/debug',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=self.host, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
model.eval()
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1400,
save_strategy="steps",
save_steps=1400,
learning_rate=4e-5,
optim="adamw_torch",
run_name=run_name,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(f"sparse-embedding/{run_name}", private=True)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
import os
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import SparseMultipleNegativesRankingLoss, SpladeLoss
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize the SPLADE model
model_name = "sparse-embedding/splade-distilbert-base-uncased-init"
model = SparseEncoder(model_name)
model.eval()
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = SpladeLoss(
model=model,
main_loss=SparseMultipleNegativesRankingLoss(model=model, scale=20, similarity_fct=model.similarity),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus,
) # Weight for document loss
run_name = f"splade-distilbert-nq-fresh-lq{lambda_query}-lc{lambda_corpus}"
os.makedirs(f"runs/{run_name}", exist_ok=True)
dev_evaluator = SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], show_progress_bar=True, batch_size=16)
os.makedirs(f"runs/{run_name}/eval", exist_ok=True)
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=f"runs/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=12,
per_device_eval_batch_size=16,
bf16=True,
logging_steps=200,
eval_strategy="steps",
eval_steps=1400,
save_strategy="steps",
save_steps=1400,
learning_rate=4e-5,
optim="adamw_torch",
run_name=run_name,
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model, output_path=f"runs/{run_name}/eval", epoch=1)
# 8. Save the trained & evaluated model locally
os.makedirs(f"runs/{run_name}/final", exist_ok=True)
model.save_pretrained(f"runs/{run_name}/final")
model.push_to_hub(run_name, private=True)
if __name__ == "__main__":
main()
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class DropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dropout_basics(self):
self.run_layer_test(
layers.Dropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_dropout_rescaling(self):
inputs = np.ones((20, 500))
layer = layers.Dropout(0.5, seed=1337)
outputs = layer(inputs, training=True)
outputs = backend.convert_to_numpy(outputs)
self.assertAllClose(np.mean(outputs), 1.0, atol=0.02)
self.assertAllClose(np.max(outputs), 2.0)
def test_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=-0.5)
def test_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=1.5)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class DropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dropout_basics(self):
self.run_layer_test(
layers.Dropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_dropout_rescaling(self):
inputs = np.ones((20, 500))
layer = layers.Dropout(0.5, seed=1337)
outputs = layer(inputs, training=True)
outputs = backend.convert_to_numpy(outputs)
self.assertAllClose(np.mean(outputs), 1.0, atol=0.02)
self.assertAllClose(np.max(outputs), 2.0)
def test_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=-0.5)
def test_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=1.5)
|
import pytest
from whisper.tokenizer import get_tokenizer
@pytest.mark.parametrize("multilingual", [True, False])
def test_tokenizer(multilingual):
tokenizer = get_tokenizer(multilingual=False)
assert tokenizer.sot in tokenizer.sot_sequence
assert len(tokenizer.all_language_codes) == len(tokenizer.all_language_tokens)
assert all(c < tokenizer.timestamp_begin for c in tokenizer.all_language_tokens)
def test_multilingual_tokenizer():
gpt2_tokenizer = get_tokenizer(multilingual=False)
multilingual_tokenizer = get_tokenizer(multilingual=True)
text = "다람쥐 헌 쳇바퀴에 타고파"
gpt2_tokens = gpt2_tokenizer.encode(text)
multilingual_tokens = multilingual_tokenizer.encode(text)
assert gpt2_tokenizer.decode(gpt2_tokens) == text
assert multilingual_tokenizer.decode(multilingual_tokens) == text
assert len(gpt2_tokens) > len(multilingual_tokens)
def test_split_on_unicode():
multilingual_tokenizer = get_tokenizer(multilingual=True)
tokens = [8404, 871, 287, 6, 246, 526, 3210, 20378]
words, word_tokens = multilingual_tokenizer.split_tokens_on_unicode(tokens)
assert words == [" elle", " est", " l", "'", "\ufffd", "é", "rit", "oire"]
assert word_tokens == [[8404], [871], [287], [6], [246], [526], [3210], [20378]]
|
from whisper.tokenizer import get_tokenizer
def test_tokenizer():
gpt2_tokenizer = get_tokenizer(multilingual=False)
multilingual_tokenizer = get_tokenizer(multilingual=True)
text = "다람쥐 헌 쳇바퀴에 타고파"
gpt2_tokens = gpt2_tokenizer.encode(text)
multilingual_tokens = multilingual_tokenizer.encode(text)
assert gpt2_tokenizer.decode(gpt2_tokens) == text
assert multilingual_tokenizer.decode(multilingual_tokens) == text
assert len(gpt2_tokens) > len(multilingual_tokens)
def test_split_on_unicode():
multilingual_tokenizer = get_tokenizer(multilingual=True)
tokens = [8404, 871, 287, 6, 246, 526, 3210, 20378]
words, word_tokens = multilingual_tokenizer.split_tokens_on_unicode(tokens)
assert words == [" elle", " est", " l", "'", "�", "é", "rit", "oire"]
assert word_tokens == [[8404], [871], [287], [6], [246], [526], [3210], [20378]]
|
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.typing import NdArray
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('stack', [False, True])
@pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)])
def test_batch(shuffle, stack, batch_size, n_batches):
class MyDoc(BaseDoc):
id: int
tensor: NdArray
t_shape = (32, 32)
da = DocArray[MyDoc](
[
MyDoc(
id=i,
tensor=np.zeros(t_shape),
)
for i in range(100)
]
)
if stack:
da = da.stack()
batches = list(da._batch(batch_size=batch_size, shuffle=shuffle))
assert len(batches) == n_batches
for i, batch in enumerate(batches):
if i < n_batches - 1:
assert len(batch) == batch_size
if stack:
assert batch.tensor.shape == (batch_size, *t_shape)
else:
assert len(batch) <= batch_size
non_shuffled_ids = [
i for i in range(i * batch_size, min((i + 1) * batch_size, len(da)))
]
if not shuffle:
assert batch.id == non_shuffled_ids
else:
assert not (batch.id == non_shuffled_ids)
|
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.typing import NdArray
@pytest.mark.parametrize('shuffle', [False, True])
@pytest.mark.parametrize('stack', [False, True])
@pytest.mark.parametrize('batch_size,n_batches', [(16, 7), (10, 10)])
def test_batch(shuffle, stack, batch_size, n_batches):
class MyDoc(BaseDocument):
id: int
tensor: NdArray
t_shape = (32, 32)
da = DocumentArray[MyDoc](
[
MyDoc(
id=i,
tensor=np.zeros(t_shape),
)
for i in range(100)
]
)
if stack:
da = da.stack()
batches = list(da._batch(batch_size=batch_size, shuffle=shuffle))
assert len(batches) == n_batches
for i, batch in enumerate(batches):
if i < n_batches - 1:
assert len(batch) == batch_size
if stack:
assert batch.tensor.shape == (batch_size, *t_shape)
else:
assert len(batch) <= batch_size
non_shuffled_ids = [
i for i in range(i * batch_size, min((i + 1) * batch_size, len(da)))
]
if not shuffle:
assert batch.id == non_shuffled_ids
else:
assert not (batch.id == non_shuffled_ids)
|
import pytest
from docarray.utils._internal.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
metrics = TensorFlowCompBackend.Metrics
else:
metrics = None
@pytest.mark.tensorflow
def test_cosine_sim_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.cosine_sim(a, b).tensor.shape == (1,)
assert metrics.cosine_sim(a, b).tensor == metrics.cosine_sim(b, a).tensor
tf.experimental.numpy.allclose(metrics.cosine_sim(a, a).tensor, tf.ones(1))
a = TensorFlowTensor(tf.random.normal((10, 3)))
b = TensorFlowTensor(tf.random.normal((5, 3)))
assert metrics.cosine_sim(a, b).tensor.shape == (10, 5)
assert metrics.cosine_sim(b, a).tensor.shape == (5, 10)
diag_dists = tf.linalg.diag(metrics.cosine_sim(b, b).tensor) # self-comparisons
tf.experimental.numpy.allclose(diag_dists, tf.ones(5))
@pytest.mark.tensorflow
def test_euclidean_dist_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.euclidean_dist(a, b).tensor.shape == (1,)
assert metrics.euclidean_dist(a, b).tensor == metrics.euclidean_dist(b, a).tensor
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.zeros((1, 1)))
b = TensorFlowTensor(tf.ones((4, 1)))
assert metrics.euclidean_dist(a, b).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, metrics.euclidean_dist(b, a).tensor
)
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.constant([0.0, 2.0, 0.0]))
b = TensorFlowTensor(tf.constant([0.0, 0.0, 2.0]))
desired_output_singleton: tf.Tensor = tf.math.sqrt(
tf.constant([2.0**2.0 + 2.0**2.0])
)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
a = TensorFlowTensor(tf.constant([[0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]))
b = TensorFlowTensor(tf.constant([[0.0, 0.0, 2.0], [0.0, 2.0, 0.0]]))
desired_output_singleton = tf.constant([[2.828427, 0.0], [0.0, 2.828427]])
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
@pytest.mark.tensorflow
def test_sqeuclidean_dist_torch():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.sqeuclidean_dist(a, b).tensor.shape == (1,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
a = TensorFlowTensor(tf.random.normal((1, 1)))
b = TensorFlowTensor(tf.random.normal((4, 1)))
assert metrics.sqeuclidean_dist(b, a).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
|
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
metrics = TensorFlowCompBackend.Metrics
else:
metrics = None
@pytest.mark.tensorflow
def test_cosine_sim_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.cosine_sim(a, b).tensor.shape == (1,)
assert metrics.cosine_sim(a, b).tensor == metrics.cosine_sim(b, a).tensor
tf.experimental.numpy.allclose(metrics.cosine_sim(a, a).tensor, tf.ones(1))
a = TensorFlowTensor(tf.random.normal((10, 3)))
b = TensorFlowTensor(tf.random.normal((5, 3)))
assert metrics.cosine_sim(a, b).tensor.shape == (10, 5)
assert metrics.cosine_sim(b, a).tensor.shape == (5, 10)
diag_dists = tf.linalg.diag(metrics.cosine_sim(b, b).tensor) # self-comparisons
tf.experimental.numpy.allclose(diag_dists, tf.ones(5))
@pytest.mark.tensorflow
def test_euclidean_dist_tf():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.euclidean_dist(a, b).tensor.shape == (1,)
assert metrics.euclidean_dist(a, b).tensor == metrics.euclidean_dist(b, a).tensor
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.zeros((1, 1)))
b = TensorFlowTensor(tf.ones((4, 1)))
assert metrics.euclidean_dist(a, b).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, metrics.euclidean_dist(b, a).tensor
)
tf.experimental.numpy.allclose(metrics.euclidean_dist(a, a).tensor, tf.zeros(1))
a = TensorFlowTensor(tf.constant([0.0, 2.0, 0.0]))
b = TensorFlowTensor(tf.constant([0.0, 0.0, 2.0]))
desired_output_singleton: tf.Tensor = tf.math.sqrt(
tf.constant([2.0**2.0 + 2.0**2.0])
)
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
a = TensorFlowTensor(tf.constant([[0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]))
b = TensorFlowTensor(tf.constant([[0.0, 0.0, 2.0], [0.0, 2.0, 0.0]]))
desired_output_singleton = tf.constant([[2.828427, 0.0], [0.0, 2.828427]])
tf.experimental.numpy.allclose(
metrics.euclidean_dist(a, b).tensor, desired_output_singleton
)
@pytest.mark.tensorflow
def test_sqeuclidean_dist_torch():
a = TensorFlowTensor(tf.random.normal((128,)))
b = TensorFlowTensor(tf.random.normal((128,)))
assert metrics.sqeuclidean_dist(a, b).tensor.shape == (1,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
a = TensorFlowTensor(tf.random.normal((1, 1)))
b = TensorFlowTensor(tf.random.normal((4, 1)))
assert metrics.sqeuclidean_dist(b, a).tensor.shape == (4,)
tf.experimental.numpy.allclose(
metrics.sqeuclidean_dist(a, b).tensor,
metrics.euclidean_dist(a, b).tensor ** 2,
)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDoc):
"""
Document for handling audios.
The Audio Document can contain:
- an [`AudioUrl`][docarray.typing.url.AudioUrl] (`AudioDoc.url`)
- an [`AudioTensor`](../../../api_references/typing/tensor/audio) (`AudioDoc.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`AudioDoc.embedding`)
- an [`AudioBytes`][docarray.typing.bytes.AudioBytes] (`AudioDoc.bytes_`) object
- an integer representing the frame_rate (`AudioDoc.frame_rate`)
You can use this Document directly:
```python
from docarray.documents import AudioDoc
# use it directly
audio = AudioDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can extend this Document:
```python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(AudioDoc):
name: Optional[TextDoc]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
)
audio.name = TextDoc(text='my first audio')
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
audio: AudioDoc
text: TextDoc
mmdoc = MultiModalDoc(
audio=AudioDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/hello.wav?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes_.load()
```
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
torch = import_library('torch', raise_error=False)
tf = import_library('tensorflow', raise_error=False)
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDoc):
"""
Document for handling audios.
The Audio Document can contain:
- an [`AudioUrl`][docarray.typing.url.AudioUrl] (`AudioDoc.url`)
- an [`AudioTensor`](../../../api_references/typing/tensor/audio) (`AudioDoc.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`AudioDoc.embedding`)
- an [`AudioBytes`][docarray.typing.bytes.AudioBytes] (`AudioDoc.bytes_`) object
- an integer representing the frame_rate (`AudioDoc.frame_rate`)
You can use this Document directly:
```python
from docarray.documents import AudioDoc
# use it directly
audio = AudioDoc(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can extend this Document:
```python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(AudioDoc):
name: Optional[TextDoc]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.name = TextDoc(text='my first audio')
audio.tensor, audio.frame_rate = audio.url.load()
# model = MyEmbeddingModel()
# audio.embedding = model(audio.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
audio: AudioDoc
text: TextDoc
mmdoc = MultiModalDoc(
audio=AudioDoc(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes_.load()
```
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from __future__ import annotations
from .splade_callbacks import SchedulerType, SpladeLambdaSchedulerCallback
__all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.callbacks.splade_callbacks import (
SchedulerType,
SpladeLambdaSchedulerCallback,
)
__all__ = ["SpladeLambdaSchedulerCallback", "SchedulerType"]
|
"""
Example of training with Dask on GPU
====================================
"""
import cupy as cp
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = dxgb.train(
client,
{
"verbosity": 2,
"tree_method": "hist",
# Golden line for GPU training
"device": "cuda",
},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X))
y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y))
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
)
prediction = dxgb.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
rng = da.random.default_rng(1)
m = 100000
n = 100
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y)
|
"""
Example of training with Dask on GPU
====================================
"""
import cupy as cp
import dask_cudf
from dask import array as da
from dask import dataframe as dd
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def using_dask_matrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
# DaskDMatrix acts like normal DMatrix, works as a proxy for local DMatrix scatter
# around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This distributed version
# of train returns a dictionary containing the resulting booster and evaluation
# history obtained from evaluation metrics.
output = dxgb.train(
client,
{
"verbosity": 2,
"tree_method": "hist",
# Golden line for GPU training
"device": "cuda",
},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
def using_quantile_device_dmatrix(client: Client, X: da.Array, y: da.Array) -> da.Array:
"""`DaskQuantileDMatrix` is a data type specialized for `hist` tree methods for
reducing memory usage.
.. versionadded:: 1.2.0
"""
X = dask_cudf.from_dask_dataframe(dd.from_dask_array(X))
y = dask_cudf.from_dask_dataframe(dd.from_dask_array(y))
# `DaskQuantileDMatrix` is used instead of `DaskDMatrix`, be careful that it can not
# be used for anything else other than training unless a reference is specified. See
# the `ref` argument of `DaskQuantileDMatrix`.
dtrain = dxgb.DaskQuantileDMatrix(client, X, y)
output = dxgb.train(
client,
{"verbosity": 2, "tree_method": "hist", "device": "cuda"},
dtrain,
num_boost_round=4,
)
prediction = dxgb.predict(client, output, X)
return prediction
if __name__ == "__main__":
# `LocalCUDACluster` is used for assigning GPU to XGBoost processes. Here
# `n_workers` represents the number of GPUs since we use one GPU per worker process.
with LocalCUDACluster(n_workers=2, threads_per_worker=4) as cluster:
with Client(cluster) as client:
# generate some random data for demonstration
rng = da.random.default_rng(1)
m = 100000
n = 100
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
print("Using DaskQuantileDMatrix")
from_ddqdm = using_quantile_device_dmatrix(client, X, y)
print("Using DMatrix")
from_dmatrix = using_dask_matrix(client, X, y)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras import activations as activations
from keras import applications as applications
from keras import callbacks as callbacks
from keras import config as config
from keras import constraints as constraints
from keras import datasets as datasets
from keras import distribution as distribution
from keras import dtype_policies as dtype_policies
from keras import export as export
from keras import initializers as initializers
from keras import legacy as legacy
from keras import mixed_precision as mixed_precision
from keras import models as models
from keras import ops as ops
from keras import optimizers as optimizers
from keras import quantizers as quantizers
from keras import random as random
from keras import regularizers as regularizers
from keras import tree as tree
from keras import utils as utils
from keras import visualization as visualization
from keras import wrappers as wrappers
from keras._tf_keras.keras import backend as backend
from keras._tf_keras.keras import layers as layers
from keras._tf_keras.keras import losses as losses
from keras._tf_keras.keras import metrics as metrics
from keras._tf_keras.keras import preprocessing as preprocessing
from keras.src.backend import Variable as Variable
from keras.src.backend import device as device
from keras.src.backend import name_scope as name_scope
from keras.src.backend.common.keras_tensor import KerasTensor as KerasTensor
from keras.src.backend.common.remat import RematScope as RematScope
from keras.src.backend.common.remat import remat as remat
from keras.src.backend.common.stateless_scope import (
StatelessScope as StatelessScope,
)
from keras.src.backend.common.symbolic_scope import (
SymbolicScope as SymbolicScope,
)
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.initializers.initializer import Initializer as Initializer
from keras.src.layers.core.input_layer import Input as Input
from keras.src.layers.input_spec import InputSpec as InputSpec
from keras.src.layers.layer import Layer as Layer
from keras.src.losses.loss import Loss as Loss
from keras.src.metrics.metric import Metric as Metric
from keras.src.models.model import Model as Model
from keras.src.models.sequential import Sequential as Sequential
from keras.src.ops.function import Function as Function
from keras.src.ops.operation import Operation as Operation
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.regularizers.regularizers import Regularizer as Regularizer
from keras.src.version import __version__ as __version__
from keras.src.version import version as version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.api import wrappers
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.remat import RematScope
from keras.src.backend.common.remat import remat
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import List, Sequence, Union
import numpy as np
import torch
from .base_data_element import BaseDataElement
class PixelData(BaseDataElement):
"""Data structure for pixel-level annotations or predictions.
All data items in ``data_fields`` of ``PixelData`` meet the following
requirements:
- They all have 3 dimensions in orders of channel, height, and width.
- They should have the same height and width.
Examples:
>>> metainfo = dict(
... img_id=random.randint(0, 100),
... img_shape=(random.randint(400, 600), random.randint(400, 600)))
>>> image = np.random.randint(0, 255, (4, 20, 40))
>>> featmap = torch.randint(0, 255, (10, 20, 40))
>>> pixel_data = PixelData(metainfo=metainfo,
... image=image,
... featmap=featmap)
>>> print(pixel_data)
>>> (20, 40)
>>> # slice
>>> slice_data = pixel_data[10:20, 20:40]
>>> assert slice_data.shape == (10, 10)
>>> slice_data = pixel_data[10, 20]
>>> assert slice_data.shape == (1, 1)
>>> # set
>>> pixel_data.map3 = torch.randint(0, 255, (20, 40))
>>> assert tuple(pixel_data.map3.shape) == (1, 20, 40)
>>> with self.assertRaises(AssertionError):
... # The dimension must be 3 or 2
... pixel_data.map2 = torch.randint(0, 255, (1, 3, 20, 40))
"""
def __setattr__(self, name: str, value: Union[torch.Tensor, np.ndarray]):
"""Set attributes of ``PixelData``.
If the dimension of value is 2 and its shape meet the demand, it
will automatically expend its channel-dimension.
Args:
name (str): The key to access the value, stored in `PixelData`.
value (Union[torch.Tensor, np.ndarray]): The value to store in.
The type of value must be `torch.Tensor` or `np.ndarray`,
and its shape must meet the requirements of `PixelData`.
"""
if name in ('_metainfo_fields', '_data_fields'):
if not hasattr(self, name):
super().__setattr__(name, value)
else:
raise AttributeError(
f'{name} has been used as a '
f'private attribute, which is immutable. ')
else:
assert isinstance(value, (torch.Tensor, np.ndarray)), \
f'Can set {type(value)}, only support' \
f' {(torch.Tensor, np.ndarray)}'
if self.shape:
assert tuple(value.shape[-2:]) == self.shape, (
f'the height and width of '
f'values {tuple(value.shape[-2:])} is '
f'not consistent with'
f' the length of this '
f':obj:`PixelData` '
f'{self.shape} ')
assert value.ndim in [
2, 3
], f'The dim of value must be 2 or 3, but got {value.ndim}'
if value.ndim == 2:
value = value[None]
warnings.warn(f'The shape of value will convert from '
f'{value.shape[-2:]} to {value.shape}')
super().__setattr__(name, value)
# TODO torch.Long/bool
def __getitem__(self, item: Sequence[Union[int, slice]]) -> 'PixelData':
"""
Args:
item (Sequence[Union[int, slice]]): get the corresponding values
according to item.
Returns:
obj:`PixelData`: Corresponding values.
"""
new_data = self.__class__(metainfo=self.metainfo)
if isinstance(item, tuple):
assert len(item) == 2, 'Only support slice height and width'
tmp_item: List[slice] = list()
for index, single_item in enumerate(item[::-1]):
if isinstance(single_item, int):
tmp_item.insert(
0, slice(single_item, None, self.shape[-index - 1]))
elif isinstance(single_item, slice):
tmp_item.insert(0, single_item)
else:
raise TypeError(
'The type of element in input must be int or slice, '
f'but got {type(single_item)}')
tmp_item.insert(0, slice(None, None, None))
item = tuple(tmp_item)
for k, v in self.items():
setattr(new_data, k, v[item])
else:
raise TypeError(
f'Unsupported type {type(item)} for slicing PixelData')
return new_data
@property
def shape(self):
"""The shape of pixel data."""
if len(self._data_fields) > 0:
return tuple(self.values()[0].shape[-2:])
else:
return None
# TODO padding, resize
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from typing import List, Sequence, Union
import numpy as np
import torch
from .base_data_element import BaseDataElement
class PixelData(BaseDataElement):
"""Data structure for pixel-level annnotations or predictions.
All data items in ``data_fields`` of ``PixelData`` meet the following
requirements:
- They all have 3 dimensions in orders of channel, height, and width.
- They should have the same height and width.
Examples:
>>> metainfo = dict(
... img_id=random.randint(0, 100),
... img_shape=(random.randint(400, 600), random.randint(400, 600)))
>>> image = np.random.randint(0, 255, (4, 20, 40))
>>> featmap = torch.randint(0, 255, (10, 20, 40))
>>> pixel_data = PixelData(metainfo=metainfo,
... image=image,
... featmap=featmap)
>>> print(pixel_data)
>>> (20, 40)
>>> # slice
>>> slice_data = pixel_data[10:20, 20:40]
>>> assert slice_data.shape == (10, 10)
>>> slice_data = pixel_data[10, 20]
>>> assert slice_data.shape == (1, 1)
>>> # set
>>> pixel_data.map3 = torch.randint(0, 255, (20, 40))
>>> assert tuple(pixel_data.map3.shape) == (1, 20, 40)
>>> with self.assertRaises(AssertionError):
... # The dimension must be 3 or 2
... pixel_data.map2 = torch.randint(0, 255, (1, 3, 20, 40))
"""
def __setattr__(self, name: str, value: Union[torch.Tensor, np.ndarray]):
"""Set attributes of ``PixelData``.
If the dimension of value is 2 and its shape meet the demand, it
will automatically expend its channel-dimension.
Args:
name (str): The key to access the value, stored in `PixelData`.
value (Union[torch.Tensor, np.ndarray]): The value to store in.
The type of value must be `torch.Tensor` or `np.ndarray`,
and its shape must meet the requirements of `PixelData`.
"""
if name in ('_metainfo_fields', '_data_fields'):
if not hasattr(self, name):
super().__setattr__(name, value)
else:
raise AttributeError(
f'{name} has been used as a '
f'private attribute, which is immutable. ')
else:
assert isinstance(value, (torch.Tensor, np.ndarray)), \
f'Can set {type(value)}, only support' \
f' {(torch.Tensor, np.ndarray)}'
if self.shape:
assert tuple(value.shape[-2:]) == self.shape, (
f'the height and width of '
f'values {tuple(value.shape[-2:])} is '
f'not consistent with'
f' the length of this '
f':obj:`PixelData` '
f'{self.shape} ')
assert value.ndim in [
2, 3
], f'The dim of value must be 2 or 3, but got {value.ndim}'
if value.ndim == 2:
value = value[None]
warnings.warn(f'The shape of value will convert from '
f'{value.shape[-2:]} to {value.shape}')
super().__setattr__(name, value)
# TODO torch.Long/bool
def __getitem__(self, item: Sequence[Union[int, slice]]) -> 'PixelData':
"""
Args:
item (Sequence[Union[int, slice]]): get the corresponding values
according to item.
Returns:
obj:`PixelData`: Corresponding values.
"""
new_data = self.__class__(metainfo=self.metainfo)
if isinstance(item, tuple):
assert len(item) == 2, 'Only support slice height and width'
tmp_item: List[slice] = list()
for index, single_item in enumerate(item[::-1]):
if isinstance(single_item, int):
tmp_item.insert(
0, slice(single_item, None, self.shape[-index - 1]))
elif isinstance(single_item, slice):
tmp_item.insert(0, single_item)
else:
raise TypeError(
'The type of element in input must be int or slice, '
f'but got {type(single_item)}')
tmp_item.insert(0, slice(None, None, None))
item = tuple(tmp_item)
for k, v in self.items():
setattr(new_data, k, v[item])
else:
raise TypeError(
f'Unsupported type {type(item)} for slicing PixelData')
return new_data
@property
def shape(self):
"""The shape of pixel data."""
if len(self._data_fields) > 0:
return tuple(self.values()[0].shape[-2:])
else:
return None
# TODO padding, resize
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from mmdet.utils import OptConfigType, OptMultiConfig
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.core.utils import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
data_preprocessor=data_preprocessor)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Sequence
import numpy as np
import tensorflow as tf
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
class ImageTFEncoder(Executor):
"""
:class:`ImageTFEncoder` encodes ``Document`` content from a ndarray,
potentially B x (Height x Width x Channel) into a ndarray of `B x D`.
Where `B` is the batch size and `D` is the Dimension.
The :class:`ImageTFEncoder` wraps the models from
`tensorflow.keras.applications`.
<https://www.tensorflow.org/api_docs/python/tf/keras/applications>`_
"""
def __init__(
self,
model_name: str = 'MobileNetV2',
img_shape: int = 336,
pool_strategy: str = 'max',
batch_size: int = 32,
traversal_paths: Sequence[str] = ['r'],
device: str = '/CPU:0',
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``DenseNet121``, ``DenseNet169``, ``DenseNet201``,
``InceptionResNetV2``, ``InceptionV3``, ``MobileNet``,
``MobileNetV2``, ``NASNetLarge``, ``NASNetMobile``,
``ResNet101``, ``ResNet152``, ``ResNet50``, ``ResNet101V2``,
``ResNet152V2``, ``ResNet50V2``, ``VGG16``, ``VGG19``,
``Xception`` and etc. A full list can be find at
<https://www.tensorflow.org/api_docs/python/tf/keras/applications#functions>`_
:param img_shape: The shape of the image to be encoded.
:param pool_strategy: the pooling strategy. Options are:
- `None`: Means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg`: Means that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max`: Means that global max pooling will be applied.
:param batch_size: size of each batch
:param traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param device: Device ('/CPU:0', '/GPU:0', '/GPU:X')
"""
super().__init__(*args, **kwargs)
if traversal_paths is None:
traversal_paths = ['r']
self.model_name = model_name
self.pool_strategy = pool_strategy
self.img_shape = img_shape
self.default_batch_size = batch_size
self.default_traversal_paths = traversal_paths
self.logger = JinaLogger(self.__class__.__name__)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
if 'GPU' in device:
gpu_index = 0 if 'GPU:' not in device else int(device.split(':')[-1])
if len(gpus) < gpu_index + 1:
raise RuntimeError(f'Device {device} not found on your system!')
self.device = tf.device(device)
with self.device:
model = getattr(tf.keras.applications, self.model_name)(
input_shape=(self.img_shape, self.img_shape, 3),
include_top=False,
pooling=self.pool_strategy,
weights='imagenet',
)
model.trainable = False
self.model = model
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Encode document content into a ndarray of `B x D`. `
B` is the batch size and `D` is the Dimension.
:param docs: DocumentArray containing blob as image data.
:param parameters: parameters dictionary.
:param kwargs: additional keyword arguments.
:return: Encoded result as a `BatchSize x D` numpy ``ndarray``,
`D` is the output dimension
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
for document_batch in document_batches_generator:
blob_batch = np.stack([d.blob for d in document_batch])
with self.device:
embedding_batch = self.model(blob_batch)
for document, embedding in zip(document_batch, embedding_batch):
document.embedding = np.array(embedding)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, List, Union
import numpy as np
import tensorflow as tf
from jina import DocumentArray, Executor, requests
from jina.logging.logger import JinaLogger
from jina_commons.batching import get_docs_batch_generator
class ImageTFEncoder(Executor):
"""
:class:`ImageTFEncoder` encodes ``Document`` content from a ndarray,
potentially B x (Height x Width x Channel) into a ndarray of `B x D`.
Where `B` is the batch size and `D` is the Dimension.
The :class:`ImageTFEncoder` wraps the models from
`tensorflow.keras.applications`.
<https://www.tensorflow.org/api_docs/python/tf/keras/applications>`_
"""
def __init__(
self,
model_name: str = 'MobileNetV2',
img_shape: int = 336,
pool_strategy: str = 'max',
default_batch_size: int = 32,
default_traversal_paths: Union[List[str], str] = None,
device: str = '/CPU:0',
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``DenseNet121``, ``DenseNet169``, ``DenseNet201``,
``InceptionResNetV2``, ``InceptionV3``, ``MobileNet``,
``MobileNetV2``, ``NASNetLarge``, ``NASNetMobile``,
``ResNet101``, ``ResNet152``, ``ResNet50``, ``ResNet101V2``,
``ResNet152V2``, ``ResNet50V2``, ``VGG16``, ``VGG19``,
``Xception`` and etc. A full list can be find at
<https://www.tensorflow.org/api_docs/python/tf/keras/applications#functions>`_
:param img_shape: The shape of the image to be encoded.
:param pool_strategy: the pooling strategy. Options are:
- `None`: Means that the output of the model will be the 4D tensor
output of the last convolutional block.
- `avg`: ;eans that global average pooling will be applied to the
output of the last convolutional block, and thus the output of
the model will be a 2D tensor.
- `max`: Means that global max pooling will be applied.
:param default_batch_size: size of each batch
:param default_traversal_paths: traversal path of the Documents, (e.g. 'r', 'c')
:param device: Device ('/CPU:0', '/GPU:0', '/GPU:X')
"""
super().__init__(*args, **kwargs)
if default_traversal_paths is None:
default_traversal_paths = ['r']
self.model_name = model_name
self.pool_strategy = pool_strategy
self.img_shape = img_shape
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.logger = JinaLogger(self.__class__.__name__)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
if 'GPU' in device:
gpu_index = 0 if 'GPU:' not in device else int(device.split(':')[-1])
if len(gpus) < gpu_index + 1:
raise RuntimeError(f'Device {device} not found on your system!')
self.device = tf.device(device)
with self.device:
model = getattr(tf.keras.applications, self.model_name)(
input_shape=(self.img_shape, self.img_shape, 3),
include_top=False,
pooling=self.pool_strategy,
weights='imagenet',
)
model.trainable = False
self.model = model
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""
Encode document content into a ndarray of `B x D`. `
B` is the batch size and `D` is the Dimension.
:param docs: DocumentArray containing blob as image data.
:param parameters: parameters dictionary.
:param kwargs: additional keyword arguments.
:return: Encoded result as a `BatchSize x D` numpy ``ndarray``,
`D` is the output dimension
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='blob',
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
for document_batch in document_batches_generator:
blob_batch = np.stack([d.blob for d in document_batch])
with self.device:
embedding_batch = self.model(blob_batch)
for document, embedding in zip(document_batch, embedding_batch):
document.embedding = np.array(embedding)
|
from typing import Union
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor as AudioTFTensor,
)
AudioTensor = AudioNdArray
if tf_available and torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor, AudioTFTensor] # type: ignore
elif tf_available:
AudioTensor = Union[AudioNdArray, AudioTFTensor] # type: ignore
elif torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor] # type: ignore
|
from typing import Union
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import (
AudioTensorFlowTensor as AudioTFTensor,
)
AudioTensor = AudioNdArray
if tf_available and torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor, AudioTFTensor] # type: ignore
elif tf_available:
AudioTensor = Union[AudioNdArray, AudioTFTensor] # type: ignore
elif torch_available:
AudioTensor = Union[AudioNdArray, AudioTorchTensor] # type: ignore
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from .audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
tokenizer_path: str = '.cache/bpe_simple_vocab_16e6.txt.gz',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path to the pre-trained AudioCLIP model.
:param traversal_paths: default traversal path (used if not specified in
request's parameters)
:param batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
super().__init__(*args, **kwargs)
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_full.sh'], cwd=root_path)
self.model = (
AudioCLIP(
pretrained=model_path,
bpe_path=tokenizer_path,
)
.to(device)
.eval()
)
self.traversal_paths = traversal_paths
self.batch_size = batch_size
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> None:
"""
Method to create embeddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
if not docs:
return
batch_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc: len(doc.text)>0
).batch(
batch_size=parameters.get('batch_size', self.batch_size),
)
with torch.inference_mode():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from .audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
tokenizer_path: str = '.cache/bpe_simple_vocab_16e6.txt.gz',
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
download_model: bool = True,
*args,
**kwargs
):
"""
:param model_path: path to the pre-trained AudioCLIP model.
:param traversal_paths: default traversal path (used if not specified in
request's parameters)
:param batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
super().__init__(*args, **kwargs)
if download_model:
import os
import subprocess
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
subprocess.call(['sh', 'scripts/download_full.sh'], cwd=root_path)
self.model = (
AudioCLIP(
pretrained=model_path,
bpe_path=tokenizer_path,
)
.to(device)
.eval()
)
self.traversal_paths = traversal_paths
self.batch_size = batch_size
@requests
def encode(
self,
docs: Optional[DocumentArray] = None,
parameters: dict = {},
*args,
**kwargs
) -> None:
"""
Method to create embeddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
if not docs:
return
batch_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
require_attr='text',
)
with torch.inference_mode():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
"""Module for argparse for Client"""
def mixin_comm_protocol_parser(parser):
"""Add the arguments for the protocol to the parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolType.from_string,
choices=list(GatewayProtocolType),
default=GatewayProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--tracing',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--traces-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--traces-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--metrics',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--metrics-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--metrics-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the metrics exporter agent.',
)
|
"""Module for argparse for Client"""
def mixin_comm_protocol_parser(parser):
"""Add the arguments for the protocol to the parser
:param parser: the parser configure
"""
from jina.enums import GatewayProtocolType
parser.add_argument(
'--protocol',
type=GatewayProtocolType.from_string,
choices=list(GatewayProtocolType),
default=GatewayProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
|
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms in the tightly-knit world of scientific Python
# packages (numpy, scipy, matplotlib).
#
# It aims to provide simple and efficient solutions to learning problems
# that are accessible to everybody and reusable in various contexts:
# machine-learning as a versatile tool for science and engineering.
#
# See https://scikit-learn.org for complete documentation.
import importlib as _importlib
import logging
import os
import random
from ._config import config_context, get_config, set_config
logger = logging.getLogger(__name__)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y.0 # For first release after an increment in Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.Y.ZaN # Alpha release
# X.Y.ZbN # Beta release
# X.Y.ZrcN # Release Candidate
# X.Y.Z # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "1.6.dev0"
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
# `_distributor_init` allows distributors to run custom init code.
# For instance, for the Windows wheel, this is used to pre-load the
# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
# sub-folder.
# It is necessary to do this prior to importing show_versions as the
# later is linked to the OpenMP runtime to make it possible to introspect
# it and importing it first would fail if the OpenMP dll cannot be found.
from . import ( # noqa: F401 E402
__check_build,
_distributor_init,
)
from .base import clone # noqa: E402
from .utils._show_versions import show_versions # noqa: E402
_submodules = [
"calibration",
"cluster",
"covariance",
"cross_decomposition",
"datasets",
"decomposition",
"dummy",
"ensemble",
"exceptions",
"experimental",
"externals",
"feature_extraction",
"feature_selection",
"gaussian_process",
"inspection",
"isotonic",
"kernel_approximation",
"kernel_ridge",
"linear_model",
"manifold",
"metrics",
"mixture",
"model_selection",
"multiclass",
"multioutput",
"naive_bayes",
"neighbors",
"neural_network",
"pipeline",
"preprocessing",
"random_projection",
"semi_supervised",
"svm",
"tree",
"discriminant_analysis",
"impute",
"compose",
]
__all__ = _submodules + [
# Non-modules:
"clone",
"get_config",
"set_config",
"config_context",
"show_versions",
]
def __dir__():
return __all__
def __getattr__(name):
if name in _submodules:
return _importlib.import_module(f"sklearn.{name}")
else:
try:
return globals()[name]
except KeyError:
raise AttributeError(f"Module 'sklearn' has no attribute '{name}'")
_BUILT_WITH_MESON = False
try:
import sklearn._built_with_meson # noqa: F401
_BUILT_WITH_MESON = True
except ModuleNotFoundError:
pass
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get("SKLEARN_SEED", None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
"""Configure global settings and get information about the working environment."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Machine learning module for Python
# ==================================
#
# sklearn is a Python module integrating classical machine
# learning algorithms in the tightly-knit world of scientific Python
# packages (numpy, scipy, matplotlib).
#
# It aims to provide simple and efficient solutions to learning problems
# that are accessible to everybody and reusable in various contexts:
# machine-learning as a versatile tool for science and engineering.
#
# See https://scikit-learn.org for complete documentation.
import logging
import os
import random
from ._config import config_context, get_config, set_config
logger = logging.getLogger(__name__)
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y.0 # For first release after an increment in Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.Y.ZaN # Alpha release
# X.Y.ZbN # Beta release
# X.Y.ZrcN # Release Candidate
# X.Y.Z # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = "1.6.dev0"
# On OSX, we can get a runtime error due to multiple OpenMP libraries loaded
# simultaneously. This can happen for instance when calling BLAS inside a
# prange. Setting the following environment variable allows multiple OpenMP
# libraries to be loaded. It should not degrade performances since we manually
# take care of potential over-subcription performance issues, in sections of
# the code where nested OpenMP loops can happen, by dynamically reconfiguring
# the inner OpenMP runtime to temporarily disable it while under the scope of
# the outer OpenMP parallel section.
os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True")
# Workaround issue discovered in intel-openmp 2019.5:
# https://github.com/ContinuumIO/anaconda-issues/issues/11294
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
# `_distributor_init` allows distributors to run custom init code.
# For instance, for the Windows wheel, this is used to pre-load the
# vcomp shared library runtime for OpenMP embedded in the sklearn/.libs
# sub-folder.
# It is necessary to do this prior to importing show_versions as the
# later is linked to the OpenMP runtime to make it possible to introspect
# it and importing it first would fail if the OpenMP dll cannot be found.
from . import ( # noqa: F401 E402
__check_build,
_distributor_init,
)
from .base import clone # noqa: E402
from .utils._show_versions import show_versions # noqa: E402
__all__ = [
"calibration",
"cluster",
"covariance",
"cross_decomposition",
"datasets",
"decomposition",
"dummy",
"ensemble",
"exceptions",
"experimental",
"externals",
"feature_extraction",
"feature_selection",
"gaussian_process",
"inspection",
"isotonic",
"kernel_approximation",
"kernel_ridge",
"linear_model",
"manifold",
"metrics",
"mixture",
"model_selection",
"multiclass",
"multioutput",
"naive_bayes",
"neighbors",
"neural_network",
"pipeline",
"preprocessing",
"random_projection",
"semi_supervised",
"svm",
"tree",
"discriminant_analysis",
"impute",
"compose",
# Non-modules:
"clone",
"get_config",
"set_config",
"config_context",
"show_versions",
]
_BUILT_WITH_MESON = False
try:
import sklearn._built_with_meson # noqa: F401
_BUILT_WITH_MESON = True
except ModuleNotFoundError:
pass
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import numpy as np
# Check if a random seed exists in the environment, if not create one.
_random_seed = os.environ.get("SKLEARN_SEED", None)
if _random_seed is None:
_random_seed = np.random.uniform() * np.iinfo(np.int32).max
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python matryoshka_sts.py
OR
python matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.MatryoshkaLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-matryoshka')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python matryoshka_sts.py
OR
python matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.MatryoshkaLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.save_to_hub(f"{model_name}-sts-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.save_to_hub('{model_name}-sts-matryoshka')`."
)
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
ignore_verifications = False
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
written = self._write(**self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
written = self._write(**self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
r"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
router_mapping (`Optional[Dict[str, str]]`, *optional*):
A mapping of dataset names to Router routes, like "query" or "document". This is used to specify which
Router module to use for each dataset. Two formats are accepted:
1. `Dict[str, str]`: A mapping of dataset names to routes for single-dataset training/evaluation.
2. `Dict[str, Dict[str, str]]`: A mapping of dataset names to a mapping of column names to routes for
multi-dataset training/evaluation.
learning_rate_mapping (`Optional[Dict[str, float]]`, *optional*):
A mapping of parameter names to learning rates. This allows you to set different learning rates for
different parts of the model, e.g., `{'IDF\.*': 1e-3}` for the IDF module. This is useful when you want to
fine-tune specific parts of the model with different learning rates.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
# dataset settings
dataset_type = 'RefCocoDataset'
data_root = 'data/coco/'
backend_args = None
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='LoadAnnotations',
with_mask=True,
with_bbox=False,
with_seg=False,
with_label=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'gt_masks', 'text'))
]
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='train2014/'),
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='val',
text_mode='select_first',
pipeline=test_pipeline))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img_path='train2014/'),
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='testA', # or 'testB'
text_mode='select_first',
pipeline=test_pipeline))
val_evaluator = dict(type='RefSegMetric', metric=['cIoU', 'mIoU'])
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'RefCOCODataset'
data_root = 'data/refcoco/'
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'image_id'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='train',
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='val',
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_prefix=dict(img='train2014/'),
ann_file='refcoco/instances.json',
split_file='refcoco/refs(unc).p',
split='testA', # or 'testB'
pipeline=test_pipeline,
backend_args=backend_args))
# TODO: set the metrics
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.7", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("3.0.1", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.7", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from ...simpleranker import SimpleRanker
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(
documents_chunk, documents_chunk_chunk, default_traversal_paths, ranking
):
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
if default_traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
if ranking == 'min':
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
else:
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
@pytest.mark.parametrize('ranking', ['mean_min', 'mean_max'])
def test_mean_ranking(documents_chunk, ranking):
default_traversal_paths = ['r']
ranker = SimpleRanker(
metric='cosine',
ranking=ranking,
default_traversal_paths=default_traversal_paths,
)
ranking_docs = documents_chunk
mean_scores = []
for doc in ranking_docs[0].chunks:
scores = []
for match in doc.matches:
scores.append(match.scores['cosine'].value)
mean_scores.append(sum(scores) / 10)
mean_scores.sort(reverse=ranking == 'mean_max')
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert match.scores['cosine'].value == pytest.approx(mean_scores[i], 1e-5)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from ...simpleranker import SimpleRanker
@pytest.mark.parametrize('default_traversal_paths', [['r'], ['c']])
@pytest.mark.parametrize('ranking', ['min', 'max'])
def test_ranking(documents_chunk, documents_chunk_chunk, default_traversal_paths, ranking):
ranker = SimpleRanker(metric='cosine', ranking=ranking, default_traversal_paths=default_traversal_paths)
if default_traversal_paths == ['r']:
ranking_docs = documents_chunk
else:
ranking_docs = documents_chunk_chunk
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
if ranking == 'min':
assert (
match.scores['cosine'].value
<= doc.matches[i + 1].scores['cosine'].value
)
else:
assert (
match.scores['cosine'].value
>= doc.matches[i + 1].scores['cosine'].value
)
@pytest.mark.parametrize('ranking', ['mean_min', 'mean_max'])
def test_mean_ranking(documents_chunk, ranking):
default_traversal_paths = ['r']
ranker = SimpleRanker(metric='cosine', ranking=ranking,
default_traversal_paths=default_traversal_paths)
ranking_docs = documents_chunk
mean_scores = []
for doc in ranking_docs[0].chunks:
scores = []
for match in doc.matches:
scores.append(match.scores['cosine'].value)
mean_scores.append(sum(scores)/10)
mean_scores.sort(reverse=ranking == 'mean_max')
ranker.rank(ranking_docs, parameters={})
assert ranking_docs
for doc in ranking_docs.traverse_flat(default_traversal_paths):
assert doc.matches
for i in range(len(doc.matches) - 1):
match = doc.matches[i]
assert match.tags
assert match.scores['cosine'].value == pytest.approx(mean_scores[i], 1e-5)
|
import os
from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_UTTERANCE_IDS = [
[19, 198, "000000", "000000"],
[26, 495, "000004", "000000"],
]
_ORIGINAL_TEXT = "this is the original text."
_NORMALIZED_TEXT = "this is the normalized text."
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LibriTTS", "train-clean-100")
for i, utterance_id in enumerate(_UTTERANCE_IDS):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 24000)
mocked_data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, "w") as file_:
file_.write(_ORIGINAL_TEXT)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, "w") as file_:
file_.write(_NORMALIZED_TEXT)
return mocked_data, _UTTERANCE_IDS, _ORIGINAL_TEXT, _NORMALIZED_TEXT
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
_utterance_ids, _original_text, _normalized_text = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._utterance_ids, cls._original_text, cls._normalized_text = get_mock_dataset(cls.root_dir)
def _test_libritts(self, dataset):
n_ites = 0
for i, (
waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id,
) in enumerate(dataset):
expected_ids = self._utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self._original_text
assert normalized_text == self._normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self._utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import (
get_whitenoise,
normalize_wav,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
_UTTERANCE_IDS = [
[19, 198, "000000", "000000"],
[26, 495, "000004", "000000"],
]
_ORIGINAL_TEXT = "this is the original text."
_NORMALIZED_TEXT = "this is the normalized text."
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LibriTTS", "train-clean-100")
for i, utterance_id in enumerate(_UTTERANCE_IDS):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 24000)
mocked_data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, "w") as file_:
file_.write(_ORIGINAL_TEXT)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, "w") as file_:
file_.write(_NORMALIZED_TEXT)
return mocked_data, _UTTERANCE_IDS, _ORIGINAL_TEXT, _NORMALIZED_TEXT
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
_utterance_ids, _original_text, _normalized_text = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._utterance_ids, cls._original_text, cls._normalized_text = get_mock_dataset(cls.root_dir)
def _test_libritts(self, dataset):
n_ites = 0
for i, (
waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id,
) in enumerate(dataset):
expected_ids = self._utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self._original_text
assert normalized_text == self._normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self._utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.planner import (
RequestsDeleteToolWithParsing,
RequestsGetToolWithParsing,
RequestsPatchToolWithParsing,
RequestsPostToolWithParsing,
RequestsPutToolWithParsing,
create_openapi_agent,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RequestsGetToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsPostToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsPatchToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsPutToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsDeleteToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.planner",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RequestsDeleteToolWithParsing",
"RequestsGetToolWithParsing",
"RequestsPatchToolWithParsing",
"RequestsPostToolWithParsing",
"RequestsPutToolWithParsing",
"create_openapi_agent",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.planner import (
RequestsDeleteToolWithParsing,
RequestsGetToolWithParsing,
RequestsPatchToolWithParsing,
RequestsPostToolWithParsing,
RequestsPutToolWithParsing,
create_openapi_agent,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RequestsGetToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsPostToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsPatchToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsPutToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"RequestsDeleteToolWithParsing": (
"langchain_community.agent_toolkits.openapi.planner"
),
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.planner",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RequestsGetToolWithParsing",
"RequestsPostToolWithParsing",
"RequestsPatchToolWithParsing",
"RequestsPutToolWithParsing",
"RequestsDeleteToolWithParsing",
"create_openapi_agent",
]
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import backend_utils
class BackendUtilsTest(testing.TestCase):
@parameterized.named_parameters(
("numpy", "numpy"),
("jax", "jax"),
("tensorflow", "tensorflow"),
("torch", "torch"),
)
def test_dynamic_backend(self, name):
dynamic_backend = backend_utils.DynamicBackend()
x = np.random.uniform(size=[1, 2, 3])
if name == "numpy":
dynamic_backend.set_backend(name)
if backend.backend() != "numpy":
with self.assertRaisesRegex(
NotImplementedError,
"Currently, we cannot dynamically import the numpy backend",
):
y = dynamic_backend.numpy.log10(x)
else:
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, np.ndarray)
elif name == "jax":
import jax
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, jax.Array)
elif name == "tensorflow":
import tensorflow as tf
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, tf.Tensor)
elif name == "torch":
import torch
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, torch.Tensor)
def test_dynamic_backend_invalid_name(self):
dynamic_backend = backend_utils.DynamicBackend()
with self.assertRaisesRegex(ValueError, "Available backends are"):
dynamic_backend.set_backend("abc")
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import backend_utils
class BackendUtilsTest(testing.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("numpy", "numpy"),
("jax", "jax"),
("tensorflow", "tensorflow"),
("torch", "torch"),
)
def test_dynamic_backend(self, name):
dynamic_backend = backend_utils.DynamicBackend()
x = np.random.uniform(size=[1, 2, 3])
if name == "numpy":
dynamic_backend.set_backend(name)
if backend.backend() != "numpy":
with self.assertRaisesRegex(
NotImplementedError,
"Currently, we cannot dynamically import the numpy backend",
):
y = dynamic_backend.numpy.log10(x)
else:
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, np.ndarray)
elif name == "jax":
import jax
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, jax.Array)
elif name == "tensorflow":
import tensorflow as tf
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, tf.Tensor)
elif name == "torch":
import torch
dynamic_backend.set_backend(name)
y = dynamic_backend.numpy.log10(x)
self.assertIsInstance(y, torch.Tensor)
def test_dynamic_backend_invalid_name(self):
dynamic_backend = backend_utils.DynamicBackend()
with self.assertRaisesRegex(ValueError, "Available backends are"):
dynamic_backend.set_backend("abc")
|
"""Test HuggingFace API wrapper."""
from pathlib import Path
import pytest
from langchain_community.llms.huggingface_hub import HuggingFaceHub
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_huggingface_text_generation() -> None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10})
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_text2text_generation() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = HuggingFaceHub(repo_id="google/flan-t5-xl")
output = llm.invoke("The capital of New York is")
assert output == "Albany"
def test_huggingface_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceHub(repo_id="facebook/bart-large-cnn")
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_call_error() -> None:
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1})
with pytest.raises(ValueError):
llm.invoke("Say foo:")
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10})
llm.save(file_path=tmp_path / "hf.yaml")
loaded_llm = load_llm(tmp_path / "hf.yaml")
assert_llm_equality(llm, loaded_llm)
|
"""Test HuggingFace API wrapper."""
from pathlib import Path
import pytest
from langchain_community.llms.huggingface_hub import HuggingFaceHub
from langchain_community.llms.loading import load_llm
from tests.integration_tests.llms.utils import assert_llm_equality
def test_huggingface_text_generation() -> None:
"""Test valid call to HuggingFace text generation model."""
llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_text2text_generation() -> None:
"""Test valid call to HuggingFace text2text model."""
llm = HuggingFaceHub(repo_id="google/flan-t5-xl") # type: ignore[call-arg]
output = llm.invoke("The capital of New York is")
assert output == "Albany"
def test_huggingface_summarization() -> None:
"""Test valid call to HuggingFace summarization model."""
llm = HuggingFaceHub(repo_id="facebook/bart-large-cnn") # type: ignore[call-arg]
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_huggingface_call_error() -> None:
"""Test valid call to HuggingFace that errors."""
llm = HuggingFaceHub(model_kwargs={"max_new_tokens": -1}) # type: ignore[call-arg]
with pytest.raises(ValueError):
llm.invoke("Say foo:")
def test_saving_loading_llm(tmp_path: Path) -> None:
"""Test saving/loading an HuggingFaceHub LLM."""
llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) # type: ignore[call-arg]
llm.save(file_path=tmp_path / "hf.yaml")
loaded_llm = load_llm(tmp_path / "hf.yaml")
assert_llm_equality(llm, loaded_llm)
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
# TODO : Watch for similaty default value as rn cosine but dot product might be better for sparse
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import numpy as np
import pytest
import torch
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from docarray.typing import NdArray, TorchTensor
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_find_torch(weaviate_client):
class TorchDoc(BaseDoc):
tens: TorchTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TorchDoc]()
index_docs = [
TorchDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TorchTensor)
assert docs[0].id == index_docs[-1].id
assert torch.allclose(docs[0].tens, index_docs[-1].tens)
@pytest.mark.tensorflow
def test_find_tensorflow():
from docarray.typing import TensorFlowTensor
class TfDoc(BaseDoc):
tens: TensorFlowTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TfDoc]()
index_docs = [
TfDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TensorFlowTensor)
assert docs[0].id == index_docs[-1].id
assert np.allclose(
docs[0].tens.unwrap().numpy(), index_docs[-1].tens.unwrap().numpy()
)
def test_contain():
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class SimpleSchema(BaseDoc):
tens: NdArray[10]
index = WeaviateDocumentIndex[SimpleSchema]()
index_docs = [SimpleDoc(tens=np.zeros(10)) for _ in range(10)]
assert (index_docs[0] in index) is False
index.index(index_docs)
for doc in index_docs:
assert (doc in index) is True
index_docs_new = [SimpleDoc(tens=np.zeros(10)) for _ in range(10)]
for doc in index_docs_new:
assert (doc in index) is False
|
# TODO: enable ruff qa on this file when we figure out why it thinks weaviate_client is
# redefined at each test that fixture
# ruff: noqa
import numpy as np
import pytest
import torch
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from docarray.typing import TorchTensor
from tests.index.weaviate.fixture_weaviate import ( # noqa: F401
start_storage,
weaviate_client,
)
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_find_torch(weaviate_client):
class TorchDoc(BaseDoc):
tens: TorchTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TorchDoc]()
index_docs = [
TorchDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TorchTensor)
assert docs[0].id == index_docs[-1].id
assert torch.allclose(docs[0].tens, index_docs[-1].tens)
@pytest.mark.tensorflow
def test_find_tensorflow():
from docarray.typing import TensorFlowTensor
class TfDoc(BaseDoc):
tens: TensorFlowTensor[10] = Field(dims=10, is_embedding=True)
index = WeaviateDocumentIndex[TfDoc]()
index_docs = [
TfDoc(tens=np.random.rand(10).astype(dtype=np.float32)) for _ in range(10)
]
index.index(index_docs)
query = index_docs[-1]
docs, scores = index.find(query, limit=5)
assert len(docs) == 5
assert len(scores) == 5
for doc in docs:
assert isinstance(doc.tens, TensorFlowTensor)
assert docs[0].id == index_docs[-1].id
assert np.allclose(
docs[0].tens.unwrap().numpy(), index_docs[-1].tens.unwrap().numpy()
)
def test_comprehensive():
import numpy as np
from pydantic import Field
from docarray import BaseDoc
from docarray.index.backends.weaviate import WeaviateDocumentIndex
from docarray.typing import NdArray
class Document(BaseDoc):
text: str
embedding: NdArray[2] = Field(
dims=2, is_embedding=True
) # Embedding column -> vector representation of the document
file: NdArray[100] = Field(dims=100)
docs = [
Document(
text="Hello world",
embedding=np.array([1, 2]),
file=np.random.rand(100),
id="1",
),
Document(
text="Hello world, how are you?",
embedding=np.array([3, 4]),
file=np.random.rand(100),
id="2",
),
Document(
text="Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut",
embedding=np.array([5, 6]),
file=np.random.rand(100),
id="3",
),
]
batch_config = {
"batch_size": 20,
"dynamic": False,
"timeout_retries": 3,
"num_workers": 1,
}
dbconfig = WeaviateDocumentIndex.DBConfig(
host="https://docarray-test-4mfexsso.weaviate.network", # Replace with your endpoint
auth_api_key="JPsfPHB3OLHrgnN80JAa7bmPApOxOfaHy0SO",
)
runtimeconfig = WeaviateDocumentIndex.RuntimeConfig(batch_config=batch_config)
store = WeaviateDocumentIndex[Document](db_config=dbconfig)
store.configure(runtimeconfig) # Batch settings being passed on
store.index(docs)
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://api.stepfun.com/v1"
DEFAULT_MODEL = "step-1v-8k"
class StepFun(OpenAILike):
"""
The StepFun class is a subclass of OpenAILike and is used to interact with the StepFun model.
Parameters
----------
model (str): The name of the Stepfun model to use. See https://platform.stepfun.com/docs/llm/modeloverview for options.
context_window (int): The maximum size of the context window for the model. See https://platform.stepfun.com/docs/llm/modeloverview for options.
is_chat_model (bool): Indicates whether the model is a chat model.
Attributes
----------
model (str): The name of the Stepfun model to use.
context_window (int): The maximum size of the context window for the model.
is_chat_model (bool): Indicates whether the model is a chat model.
"""
model: str = Field(
description="The Stepfun model to use. See https://platform.stepfun.com/docs/llm/modeloverview for options."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. See https://platform.stepfun.com/docs/llm/modeloverview for options.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Initialize the OpenAI API client.
Args:
model (str): The name of the model to use. Defaults to DEFAULT_MODEL.
temperature (float): The temperature to use for the model. Defaults to DEFAULT_TEMPERATURE.
max_tokens (int): The maximum number of tokens to generate. Defaults to DEFAULT_NUM_OUTPUTS.
additional_kwargs (Optional[Dict[str, Any]]): Additional keyword arguments to pass to the model. Defaults to None.
max_retries (int): The maximum number of retries to make when calling the API. Defaults to 5.
api_base (Optional[str]): The base URL for the API. Defaults to DEFAULT_API_BASE.
api_key (Optional[str]): The API key to use. Defaults to None.
**kwargs (Any): Additional keyword arguments to pass to the model.
Returns:
None
"""
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "STEPFUN_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "STEPFUN_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Stpefun_LLM"
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_CONTEXT_WINDOW,
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://api.stepfun.com/v1"
DEFAULT_MODEL = "step-1v-8k"
class StepFun(OpenAILike):
"""
The StepFun class is a subclass of OpenAILike and is used to interact with the StepFun model.
Parameters:
model (str): The name of the Stepfun model to use. See https://platform.stepfun.com/docs/llm/modeloverview for options.
context_window (int): The maximum size of the context window for the model. See https://platform.stepfun.com/docs/llm/modeloverview for options.
is_chat_model (bool): Indicates whether the model is a chat model.
Attributes:
model (str): The name of the Stepfun model to use.
context_window (int): The maximum size of the context window for the model.
is_chat_model (bool): Indicates whether the model is a chat model.
"""
model: str = Field(
description="The Stepfun model to use. See https://platform.stepfun.com/docs/llm/modeloverview for options."
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. See https://platform.stepfun.com/docs/llm/modeloverview for options.",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Initialize the OpenAI API client.
Args:
model (str): The name of the model to use. Defaults to DEFAULT_MODEL.
temperature (float): The temperature to use for the model. Defaults to DEFAULT_TEMPERATURE.
max_tokens (int): The maximum number of tokens to generate. Defaults to DEFAULT_NUM_OUTPUTS.
additional_kwargs (Optional[Dict[str, Any]]): Additional keyword arguments to pass to the model. Defaults to None.
max_retries (int): The maximum number of retries to make when calling the API. Defaults to 5.
api_base (Optional[str]): The base URL for the API. Defaults to DEFAULT_API_BASE.
api_key (Optional[str]): The API key to use. Defaults to None.
**kwargs (Any): Additional keyword arguments to pass to the model.
Returns:
None
"""
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "STEPFUN_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "STEPFUN_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Stpefun_LLM"
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
elu,
selu,
softplus,
softsign,
silu,
gelu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
from typing import Any, Dict, Optional
import httpx
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.fireworks.utils import (
resolve_fireworks_credentials,
)
from llama_index.embeddings.openai import OpenAIEmbedding
DEFAULT_API_BASE = "https://api.fireworks.ai/inference/v1"
DEFAULT_MODEL = "nomic-ai/nomic-embed-text-v1.5"
class FireworksEmbedding(OpenAIEmbedding):
"""
Fireworks class for embeddings.
Args:
model (str): Model for embedding.
Defaults to "nomic-ai/nomic-embed-text-v1.5"
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The Fireworks API key.")
api_base: str = Field(description="The base URL for Fireworks API.")
api_version: str = Field(description="The version for OpenAI API.")
def __init__(
self,
model_name: str = DEFAULT_MODEL,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = DEFAULT_API_BASE,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
api_key, api_base, api_version = resolve_fireworks_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "FireworksEmbedding"
|
from typing import Any, Dict, Optional
import httpx
from llama_index.core.base.embeddings.base import (
DEFAULT_EMBED_BATCH_SIZE,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks import CallbackManager
from llama_index.embeddings.fireworks.utils import (
resolve_fireworks_credentials,
)
from llama_index.embeddings.openai import OpenAIEmbedding
DEFAULT_API_BASE = "https://api.fireworks.ai/inference/v1"
DEFAULT_MODEL = "nomic-ai/nomic-embed-text-v1.5"
class FireworksEmbedding(OpenAIEmbedding):
"""
Fireworks class for embeddings.
Args:
model (str): Model for embedding.
Defaults to "nomic-ai/nomic-embed-text-v1.5"
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the OpenAI API."
)
api_key: str = Field(description="The Fireworks API key.")
api_base: str = Field(description="The base URL for Fireworks API.")
api_version: str = Field(description="The version for OpenAI API.")
def __init__(
self,
model_name: str = DEFAULT_MODEL,
dimensions: Optional[int] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = DEFAULT_API_BASE,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
**kwargs: Any,
) -> None:
api_key, api_base, api_version = resolve_fireworks_credentials(
api_key=api_key,
api_base=api_base,
api_version=api_version,
)
super().__init__(
model_name=model_name,
dimensions=dimensions,
embed_batch_size=embed_batch_size,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
timeout=timeout,
reuse_client=reuse_client,
callback_manager=callback_manager,
default_headers=default_headers,
http_client=http_client,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "FireworksEmbedding"
|
from docarray import BaseDoc
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(BaseDoc):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
|
from docarray import BaseDocument
from docarray.typing import AnyUrl
def test_set_any_url():
class MyDocument(BaseDocument):
any_url: AnyUrl
d = MyDocument(any_url="https://jina.ai")
assert isinstance(d.any_url, AnyUrl)
assert d.any_url == "https://jina.ai"
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = list(sorted(list(set(self.languages)))) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import nightly, require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@nightly
@require_flax
class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_flax(self):
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2",
variant="bf16",
dtype=jnp.bfloat16,
)
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512])
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
@nightly
@require_flax
class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_dpm_flax(self):
model_id = "stabilityai/stable-diffusion-2"
scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
model_id,
scheduler=scheduler,
variant="bf16",
dtype=jnp.bfloat16,
)
params["scheduler"] = scheduler_params
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297])
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import nightly, require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@nightly
@require_flax
class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_flax(self):
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2",
variant="bf16",
dtype=jnp.bfloat16,
)
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512])
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
@nightly
@require_flax
class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_dpm_flax(self):
model_id = "stabilityai/stable-diffusion-2"
scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained(
model_id,
scheduler=scheduler,
variant="bf16",
dtype=jnp.bfloat16,
)
params["scheduler"] = scheduler_params
prompt = "A painting of a squirrel eating a burger"
num_samples = jax.device_count()
prompt = num_samples * [prompt]
prompt_ids = sd_pipe.prepare_inputs(prompt)
params = replicate(params)
prompt_ids = shard(prompt_ids)
prng_seed = jax.random.PRNGKey(0)
prng_seed = jax.random.split(prng_seed, jax.device_count())
images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297])
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
# docstyle-ignore
INSTALL_CONTENT = """
# Datasets installation
! pip install datasets transformers
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/datasets.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
default_branch_name = "main"
version_prefix = ""
|
default_branch_name = "main"
version_prefix = ""
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend
from keras.src.backend.config import disable_flash_attention
from keras.src.backend.config import enable_flash_attention
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import is_flash_attention_enabled
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.saving.serialization_lib import enable_unsafe_deserialization
from keras.src.utils.backend_utils import set_backend
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.traceback_utils import disable_traceback_filtering
from keras.src.utils.traceback_utils import enable_traceback_filtering
from keras.src.utils.traceback_utils import is_traceback_filtering_enabled
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.layers.attention.attention import disable_flash_attention
from keras.src.layers.attention.attention import enable_flash_attention
from keras.src.layers.attention.attention import is_flash_attention_enabled
from keras.src.saving.serialization_lib import enable_unsafe_deserialization
from keras.src.utils.backend_utils import set_backend
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.traceback_utils import disable_traceback_filtering
from keras.src.utils.traceback_utils import enable_traceback_filtering
from keras.src.utils.traceback_utils import is_traceback_filtering_enabled
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""Set the return type of torch operations on datapoints.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
Can be used as a global flag for the entire program:
.. code:: python
img = datapoints.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("datapoints")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = datapoints.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("datapoints"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "datapoint" or "tensor". Default is "tensor".
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "datapoint": True}[return_type.lower()]
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""Set the return type of torch operations on datapoints.
Can be used as a global flag for the entire program:
.. code:: python
set_return_type("datapoints")
img = datapoints.Image(torch.rand(3, 5, 5))
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = datapoints.Image(torch.rand(3, 5, 5))
with set_return_type("datapoints"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "datapoint" or "tensor". Default is "tensor".
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "datapoint": True}[return_type.lower()]
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBox
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_box=BoundingBox(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.prototype.datapoints import BoundingBox, Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_box=BoundingBox(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* [:footcite:`Mir2015QUESST2014EQ`] Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
assert subset in ["docs", "dev", "eval"], "`subset` must be one of ['docs', 'dev', 'eval']"
assert language is None or language in _LANGUAGES, f"`language` must be None or one of {str(_LANGUAGES)}"
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def _load_sample(self, n: int) -> Tuple[torch.Tensor, str]:
audio_path = self.data[n]
wav, sample_rate = torchaudio.load(audio_path)
return wav, sample_rate, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, file_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* [:footcite:`Mir2015QUESST2014EQ`] Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
assert subset in ["docs", "dev", "eval"], "`subset` must be one of ['docs', 'dev', 'eval']"
assert language is None or language in _LANGUAGES, f"`language` must be None or one of {str(_LANGUAGES)}"
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def _load_sample(self, n: int) -> Tuple[torch.Tensor, str]:
audio_path = self.data[n]
wav, _ = torchaudio.load(audio_path)
return wav, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, str]:
return self._load_sample(n)
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
from typing import Any
from llama_index.core.bridge.pydantic import Field, model_serializer
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Any = Field(exclude=True)
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Any
current_agent_name: str
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
class AgentWorkflowStartEvent(StartEvent):
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
}
|
from typing import Any
from llama_index.core.bridge.pydantic import model_serializer
from llama_index.core.tools import ToolSelection, ToolOutput
from llama_index.core.llms import ChatMessage
from llama_index.core.workflow import Event, StartEvent
class AgentInput(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
class AgentSetup(Event):
"""Agent setup."""
input: list[ChatMessage]
current_agent_name: str
class AgentStream(Event):
"""Agent stream."""
delta: str
response: str
current_agent_name: str
tool_calls: list[ToolSelection]
raw: Any
class AgentOutput(Event):
"""LLM output."""
response: ChatMessage
tool_calls: list[ToolSelection]
raw: Any
current_agent_name: str
def __str__(self) -> str:
return self.response.content or ""
class ToolCall(Event):
"""All tool calls are surfaced."""
tool_name: str
tool_kwargs: dict
tool_id: str
class ToolCallResult(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
class AgentWorkflowStartEvent(StartEvent):
@model_serializer()
def serialize_start_event(self) -> dict:
"""Serialize the start event and exclude the memory."""
return {
"user_msg": self.user_msg,
"chat_history": self.chat_history,
}
|
import json
import os
import pytest
from hubble.executor import HubExecutor
from hubble.executor.hubio import HubIO
from jina import __version__
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
def test_get_image_name(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
uses = 'jinahub://DummyExecutor'
image_name = get_image_name(uses)
assert image_name == 'jinahub/DummyExecutor'
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
import json
import os
import pytest
from jina import __version__
from jina.hubble import HubExecutor
from jina.hubble.hubio import HubIO
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
def test_get_image_name(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
uses = 'jinahub://DummyExecutor'
image_name = get_image_name(uses)
assert image_name == 'jinahub/DummyExecutor'
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
"""Test chat model integration using standard integration tests."""
from typing import Type
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_ollama.chat_models import ChatOllama
class TestChatOllama(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3.1"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def has_tool_choice(self) -> bool:
return False
|
"""Test chat model integration using standard integration tests."""
from typing import Type
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_ollama.chat_models import ChatOllama
class TestChatOllama(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3.1"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
|
import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from huggingface_hub.hf_api import RepoFile
from packaging import version
from requests import ConnectionError, HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors
if config.HF_HUB_VERSION < version.parse("0.20.0"):
def preupload_lfs_files(hf_api: HfApi, **kwargs):
max_retries = 5
base_wait_time = 1
max_wait_time = 8
retry = 0
while True:
try:
hf_api.preupload_lfs_files(**kwargs)
except (RuntimeError, HTTPError, ConnectionError) as err:
if isinstance(err, RuntimeError):
if isinstance(err.__cause__, (HTTPError, ConnectionError)):
err = err.__cause__
else:
raise err
if retry >= max_retries or err.response and err.response.status_code not in [500, 503]:
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(
f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]"
)
time.sleep(sleep_time)
retry += 1
else:
break
else:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
hf_api.preupload_lfs_files(**kwargs)
# `list_files_info` is deprecated in favor of `list_repo_tree` in `huggingface_hub>=0.20.0`
if config.HF_HUB_VERSION < version.parse("0.20.0"):
def list_files_info(hf_api: HfApi, **kwargs):
yield from hf_api.list_files_info(**kwargs)
else:
def list_files_info(hf_api: HfApi, **kwargs):
kwargs = {**kwargs, "recursive": True}
for repo_path in hf_api.list_repo_tree(**kwargs):
if isinstance(repo_path, RepoFile):
yield repo_path
# bakckward compatibility
hf_hub_url = partial(hf_hub_url, repo_type="dataset")
|
import time
from functools import partial
from huggingface_hub import HfApi, hf_hub_url
from packaging import version
from requests import ConnectionError, HTTPError
from .. import config
from . import logging
logger = logging.get_logger(__name__)
# Retry `preupload_lfs_files` in `huggingface_hub<0.20.0` on the "500 (Internal Server Error)" and "503 (Service Unavailable)" HTTP errors
if config.HF_HUB_VERSION < version.parse("0.20.0"):
def preupload_lfs_files(hf_api: HfApi, **kwargs):
max_retries = 5
base_wait_time = 1
max_wait_time = 8
retry = 0
while True:
try:
hf_api.preupload_lfs_files(**kwargs)
except (RuntimeError, HTTPError, ConnectionError) as err:
if isinstance(err, RuntimeError):
if isinstance(err.__cause__, (HTTPError, ConnectionError)):
err = err.__cause__
else:
raise err
if retry >= max_retries or err.response and err.response.status_code not in [500, 503]:
raise err
else:
sleep_time = min(max_wait_time, base_wait_time * 2**retry) # Exponential backoff
logger.info(
f"{hf_api.preupload_lfs_files} timed out, retrying in {sleep_time}s... [{retry/max_retries}]"
)
time.sleep(sleep_time)
retry += 1
else:
break
else:
def preupload_lfs_files(hf_api: HfApi, **kwargs):
hf_api.preupload_lfs_files(**kwargs)
# bakckward compatibility
hf_hub_url = partial(hf_hub_url, repo_type="dataset")
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseMOTModel
from .bytetrack import ByteTrack
from .deep_sort import DeepSORT
from .qdtrack import QDTrack
__all__ = ['BaseMOTModel', 'ByteTrack', 'QDTrack', 'DeepSORT']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseMOTModel
from .bytetrack import ByteTrack
from .qdtrack import QDTrack
__all__ = ['BaseMOTModel', 'ByteTrack', 'QDTrack']
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
if backend not in ("tensorflow", "jax", "torch", "numpy"):
raise ValueError(
"Available backends are ('tensorflow', 'jax', 'torch' and "
f"'numpy'). Received: backend={backend}"
)
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
module = importlib.import_module("keras.src.backend.tensorflow")
return getattr(module, name)
if self._backend == "jax":
module = importlib.import_module("keras.src.backend.jax")
return getattr(module, name)
if self._backend == "torch":
module = importlib.import_module("keras.src.backend.torch")
return getattr(module, name)
if self._backend == "numpy":
if backend_module.backend() == "numpy":
return getattr(backend_module, name)
else:
raise NotImplementedError(
"Currently, we cannot dynamically import the numpy backend "
"because it would disrupt the namespace of the import."
)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
from keras.src.backend import tensorflow as tf_backend
return getattr(tf_backend, name)
if self._backend == "jax":
from keras.src.backend import jax as jax_backend
return getattr(jax_backend, name)
if self._backend == "torch":
from keras.src.backend import torch as torch_backend
return getattr(torch_backend, name)
if self._backend == "numpy":
# TODO (ariG23498):
# The import `from keras.src.backend import numpy as numpy_backend`
# is not working. This is a temporary fix.
# The import is redirected to `keras.backend.numpy.numpy.py`
from keras.src import backend as numpy_backend
return getattr(numpy_backend, name)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .build_functions import (build_model_from_cfg, build_runner_from_cfg,
build_scheduler_from_cfg)
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', build_func=build_scheduler_from_cfg)
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage inferencer
INFERENCERS = Registry('inferencer')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 20 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .build_functions import (build_model_from_cfg, build_runner_from_cfg,
build_scheduler_from_cfg)
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner', build_func=build_runner_from_cfg)
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model', build_model_from_cfg)
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry(
'parameter scheduler', build_func=build_scheduler_from_cfg)
# manage all kinds of metrics
METRICS = Registry('metric')
# manage evaluator
EVALUATOR = Registry('evaluator')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
import hashlib
import json
from typing import Tuple, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class FeatureHashMixin:
"""Provide helper functions for feature hashing."""
def embed_feature_hashing(
self: 'T',
n_dim: int = 256,
sparse: bool = False,
fields: Tuple[str, ...] = ('text', 'tags'),
max_value: int = 1_000_000,
) -> 'T':
"""Convert an arbitrary set of attributes into a fixed-dimensional matrix using the hashing trick.
:param n_dim: the dimensionality of each document in the output embedding.
Small numbers of features are likely to cause hash collisions,
but large numbers will cause larger overall parameter dimensions.
:param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.
Note that this feature requires ``scipy``
:param fields: which attributes to be considered as for feature hashing.
"""
if sparse:
from scipy.sparse import csr_matrix
idxs, data = [], [] # sparse
table = np.zeros(n_dim) # dense
for f in fields:
if 'text' in fields:
all_tokens = self.get_vocabulary(('text',))
for f_id, val in all_tokens.items():
_hash_column(f_id, val, n_dim, max_value, idxs, data, table)
if 'tags' in fields:
for k, v in self.tags.items():
_hash_column(k, v, n_dim, max_value, idxs, data, table)
v = getattr(self, f, None)
if v:
_hash_column(f, v, n_dim, max_value, idxs, data, table)
if sparse:
self.embedding = csr_matrix((data, zip(*idxs)), shape=(1, n_dim))
else:
self.embedding = table
return self
def _hash_column(col_name, col_val, n_dim, max_value, idxs, data, table):
h = _any_hash(col_name)
col_val = _any_hash(col_val) % max_value
col = h % n_dim
idxs.append((0, col))
data.append(np.sign(h) * col_val)
table[col] += np.sign(h) * col_val
def _any_hash(v):
if not v:
# ignore it when the parameter is empty
return 0
elif isinstance(v, (tuple, dict, list, str)):
if isinstance(v, str):
v = v.strip()
if v.lower() in {'true', 'yes'}: # parse boolean parameter
return 1
if v.lower() in {'false', 'no'}:
return 0
else:
v = json.dumps(v, sort_keys=True)
return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)
else:
try:
return int(v) # parse int parameter
except ValueError:
try:
return float(v) # parse float parameter
except ValueError:
return 0 # unable to hash
|
import hashlib
import json
from typing import Tuple, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class FeatureHashMixin:
"""Provide helper functions for feature hashing."""
def embed_feature_hashing(
self: 'T',
n_dim: int = 256,
sparse: bool = False,
fields: Tuple[str, ...] = ('text', 'tags'),
max_value: int = 1_000_000,
) -> 'T':
"""Convert an arbitrary set of attributes into a fixed-dimensional matrix using the hashing trick.
:param n_dim: the dimensionality of each document in the output embedding.
Small numbers of features are likely to cause hash collisions,
but large numbers will cause larger overall parameter dimensions.
:param sparse: whether the resulting feature matrix should be a sparse csr_matrix or dense ndarray.
Note that this feature requires ``scipy``
:param fields: which attributes to be considered as for feature hashing.
"""
if sparse:
from scipy.sparse import csr_matrix
idxs, data = [], [] # sparse
table = np.zeros(n_dim) # dense
for f in fields:
if 'text' in fields:
all_tokens = self.get_vocabulary(('text',))
for f_id, val in all_tokens.items():
_hash_column(f_id, val, n_dim, max_value, idxs, data, table)
if 'tags' in fields:
for k, v in self.tags.items():
_hash_column(k, v, n_dim, max_value, idxs, data, table)
v = getattr(self, f, None)
if v:
_hash_column(f, v, n_dim, max_value, idxs, data, table)
if sparse:
self.embedding = csr_matrix((data, zip(*idxs)), shape=(1, n_dim))
else:
self.embedding = table
return self
def _hash_column(col_name, col_val, n_dim, max_value, idxs, data, table):
h = _any_hash(col_name)
col_val = _any_hash(col_val) % max_value
col = h % n_dim
idxs.append((0, col))
data.append(np.sign(h) * col_val)
table[col] += np.sign(h) * col_val
def _any_hash(v):
if not v:
# ignore it when the parameter is empty
return 0
elif isinstance(v, (tuple, dict, list, str)):
if isinstance(v, str):
v = v.strip()
if v.lower() in {'true', 'yes'}: # parse boolean parameter
return 1
if v.lower() in {'false', 'no'}:
return 0
else:
v = json.dumps(v, sort_keys=True)
return int(hashlib.md5(str(v).encode('utf-8')).hexdigest(), base=16)
else:
try:
return int(v) # parse int parameter
except ValueError:
try:
return float(v) # parse float parameter
except ValueError:
return 0 # unable to hash
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.