input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
Generation,
GenerationChunk,
LLMResult,
RunInfo,
)
__all__ = [
"ChatGeneration",
"ChatGenerationChunk",
"ChatResult",
"Generation",
"GenerationChunk",
"LLMResult",
"RunInfo",
]
|
from langchain_core.outputs import (
ChatGeneration,
ChatGenerationChunk,
ChatResult,
Generation,
GenerationChunk,
LLMResult,
RunInfo,
)
__all__ = [
"Generation",
"GenerationChunk",
"ChatGeneration",
"ChatGenerationChunk",
"RunInfo",
"ChatResult",
"LLMResult",
]
|
from .common_utils import _get_id2label, _get_label2id, create_tsv
from .feature_utils import dump_features
from .kmeans import get_km_label, learn_kmeans
__all__ = [
"create_tsv",
"_get_id2label",
"_get_label2id",
"dump_features",
"learn_kmeans",
"get_km_label",
]
|
from .common_utils import create_tsv
from .feature_utils import dump_features
from .kmeans import get_km_label, learn_kmeans
__all__ = [
"create_tsv",
"dump_features",
"learn_kmeans",
"get_km_label",
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar10 import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar10 import load_data
|
"""
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
Google Colab example: https://colab.research.google.com/drive/12cn5Oo0v3HfQQ8Tv6-ukgxXSmT3zl35A?usp=sharing
"""
import csv
import os
import pickle
import time
from sentence_transformers import SentenceTransformer, util
model_name = "quora-distilbert-multilingual"
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
# Move embeddings to the target device of the model
corpus_embeddings = corpus_embeddings.to(model.device)
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] # Get the hits for the first query
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This script contains an example how to perform semantic search with PyTorch. It performs exact nearest neighborh search.
As dataset, we use the Quora Duplicate Questions dataset, which contains about 500k questions (we only use about 100k):
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
As embeddings model, we use the SBERT model 'quora-distilbert-multilingual',
that it aligned for 100 languages. I.e., you can type in a question in various languages and it will
return the closest questions in the corpus (questions in the corpus are mainly in English).
Google Colab example: https://colab.research.google.com/drive/12cn5Oo0v3HfQQ8Tv6-ukgxXSmT3zl35A?usp=sharing
"""
from sentence_transformers import SentenceTransformer, util
import os
import csv
import pickle
import time
model_name = 'quora-distilbert-multilingual'
model = SentenceTransformer(model_name)
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 100000
embedding_cache_path = 'quora-embeddings-{}-size-{}.pkl'.format(model_name.replace('/', '_'), max_corpus_size)
#Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row['question1'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row['question2'])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({'sentences': corpus_sentences, 'embeddings': corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data['sentences'][0:max_corpus_size]
corpus_embeddings = cache_data['embeddings'][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
#Move embeddings to the target device of the model
corpus_embeddings = corpus_embeddings.to(model._target_device)
while True:
inp_question = input("Please enter a question: ")
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings)
end_time = time.time()
hits = hits[0] #Get the hits for the first query
print("Input question:", inp_question)
print("Results (after {:.3f} seconds):".format(end_time-start_time))
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit['score'], corpus_sentences[hit['corpus_id']]))
print("\n\n========\n")
|
import inspect
import re
from hashlib import sha256
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
|
import inspect
import re
from hashlib import sha256
from typing import List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
_EXTENSION_TO_MODULE = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
|
# pants requires this import to recognize the dep
import pytest_asyncio # noqa: F401
import pytest
from llama_index.core.workflow.workflow import Workflow
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import StartEvent, StopEvent, Event
from llama_index.core.bridge.pydantic import Field
class OneTestEvent(Event):
test_param: str = Field(default="test")
class AnotherTestEvent(Event):
another_test_param: str = Field(default="another_test")
class LastEvent(Event):
pass
class DummyWorkflow(Workflow):
@step()
async def start_step(self, ev: StartEvent) -> OneTestEvent:
return OneTestEvent()
@step()
async def middle_step(self, ev: OneTestEvent) -> LastEvent:
return LastEvent()
@step()
async def end_step(self, ev: LastEvent) -> StopEvent:
return StopEvent(result="Workflow completed")
@pytest.fixture()
def workflow():
return DummyWorkflow()
@pytest.fixture()
def events():
return [OneTestEvent, AnotherTestEvent]
|
import pytest
from llama_index.core.workflow.workflow import Workflow
from llama_index.core.workflow.decorators import step
from llama_index.core.workflow.events import StartEvent, StopEvent, Event
from llama_index.core.bridge.pydantic import Field
class OneTestEvent(Event):
test_param: str = Field(default="test")
class AnotherTestEvent(Event):
another_test_param: str = Field(default="another_test")
class LastEvent(Event):
pass
class DummyWorkflow(Workflow):
@step()
async def start_step(self, ev: StartEvent) -> OneTestEvent:
return OneTestEvent()
@step()
async def middle_step(self, ev: OneTestEvent) -> LastEvent:
return LastEvent()
@step()
async def end_step(self, ev: LastEvent) -> StopEvent:
return StopEvent(result="Workflow completed")
@pytest.fixture()
def workflow():
return DummyWorkflow()
@pytest.fixture()
def events():
return [OneTestEvent, AnotherTestEvent]
|
import random
from collections import defaultdict
from typing import Dict, Any, TYPE_CHECKING, Generator, List
import numpy as np
from docarray.helper import dunder_get
if TYPE_CHECKING: # pragma: no cover
from docarray import DocumentArray
class GroupMixin:
"""These helpers yield groups of :class:`DocumentArray` from
a source :class:`DocumentArray`."""
def split_by_tag(self, tag: str) -> Dict[Any, 'DocumentArray']:
"""Split the `DocumentArray` into multiple DocumentArray according to the tag value of each `Document`.
:param tag: the tag name to split stored in tags.
:return: a dict where Documents with the same value on `tag` are grouped together, their orders
are preserved from the original :class:`DocumentArray`.
.. note::
If the :attr:`tags` of :class:`Document` do not contains the specified :attr:`tag`,
return an empty dict.
"""
from docarray import DocumentArray
rv = defaultdict(DocumentArray)
for doc in self:
if '__' in tag:
value = dunder_get(doc.tags, tag)
elif tag in doc.tags:
value = doc.tags[tag]
else:
continue
rv[value].append(doc)
return dict(rv)
def batch(
self,
batch_size: int,
shuffle: bool = False,
show_progress: bool = False,
) -> Generator['DocumentArray', None, None]:
"""
Creates a `Generator` that yields `DocumentArray` of size `batch_size` until `docs` is fully traversed along
the `traversal_path`. The None `docs` are filtered out and optionally the `docs` can be filtered by checking for
the existence of a `Document` attribute.
Note, that the last batch might be smaller than `batch_size`.
:param batch_size: Size of each generated batch (except the last one, which might be smaller, default: 32)
:param shuffle: If set, shuffle the Documents before dividing into minibatches.
:param show_progress: if set, show a progress bar when batching documents.
:yield: a Generator of `DocumentArray`, each in the length of `batch_size`
"""
from rich.progress import track
if not (isinstance(batch_size, int) and batch_size > 0):
raise ValueError('`batch_size` should be a positive integer')
N = len(self)
ix = list(range(N))
n_batches = int(np.ceil(N / batch_size))
if shuffle:
random.shuffle(ix)
for i in track(
range(n_batches),
description='Batching documents',
disable=not show_progress,
):
yield self[ix[i * batch_size : (i + 1) * batch_size]]
def batch_ids(
self,
batch_size: int,
shuffle: bool = False,
) -> Generator[List[str], None, None]:
"""
Creates a `Generator` that yields `lists of ids` of size `batch_size` until `self` is fully traversed.
Note, that the last batch might be smaller than `batch_size`.
:param batch_size: Size of each generated batch (except the last one, which might be smaller)
:param shuffle: If set, shuffle the Documents before dividing into minibatches.
:yield: a Generator of `list` of IDs, each in the length of `batch_size`
"""
if not (isinstance(batch_size, int) and batch_size > 0):
raise ValueError('`batch_size` should be a positive integer')
N = len(self)
ix = self[:, 'id']
n_batches = int(np.ceil(N / batch_size))
if shuffle:
random.shuffle(ix)
for i in range(n_batches):
yield ix[i * batch_size : (i + 1) * batch_size]
|
import random
from collections import defaultdict
from typing import Dict, Any, TYPE_CHECKING, Generator, List
import numpy as np
from docarray.helper import dunder_get
if TYPE_CHECKING:
from docarray import DocumentArray
class GroupMixin:
"""These helpers yield groups of :class:`DocumentArray` from
a source :class:`DocumentArray`."""
def split_by_tag(self, tag: str) -> Dict[Any, 'DocumentArray']:
"""Split the `DocumentArray` into multiple DocumentArray according to the tag value of each `Document`.
:param tag: the tag name to split stored in tags.
:return: a dict where Documents with the same value on `tag` are grouped together, their orders
are preserved from the original :class:`DocumentArray`.
.. note::
If the :attr:`tags` of :class:`Document` do not contains the specified :attr:`tag`,
return an empty dict.
"""
from docarray import DocumentArray
rv = defaultdict(DocumentArray)
for doc in self:
if '__' in tag:
value = dunder_get(doc.tags, tag)
elif tag in doc.tags:
value = doc.tags[tag]
else:
continue
rv[value].append(doc)
return dict(rv)
def batch(
self,
batch_size: int,
shuffle: bool = False,
show_progress: bool = False,
) -> Generator['DocumentArray', None, None]:
"""
Creates a `Generator` that yields `DocumentArray` of size `batch_size` until `docs` is fully traversed along
the `traversal_path`. The None `docs` are filtered out and optionally the `docs` can be filtered by checking for
the existence of a `Document` attribute.
Note, that the last batch might be smaller than `batch_size`.
:param batch_size: Size of each generated batch (except the last one, which might be smaller, default: 32)
:param shuffle: If set, shuffle the Documents before dividing into minibatches.
:param show_progress: if set, show a progress bar when batching documents.
:yield: a Generator of `DocumentArray`, each in the length of `batch_size`
"""
from rich.progress import track
if not (isinstance(batch_size, int) and batch_size > 0):
raise ValueError('`batch_size` should be a positive integer')
N = len(self)
ix = list(range(N))
n_batches = int(np.ceil(N / batch_size))
if shuffle:
random.shuffle(ix)
for i in track(
range(n_batches),
description='Batching documents',
disable=not show_progress,
):
yield self[ix[i * batch_size : (i + 1) * batch_size]]
def batch_ids(
self,
batch_size: int,
shuffle: bool = False,
) -> Generator[List[str], None, None]:
"""
Creates a `Generator` that yields `lists of ids` of size `batch_size` until `self` is fully traversed.
Note, that the last batch might be smaller than `batch_size`.
:param batch_size: Size of each generated batch (except the last one, which might be smaller)
:param shuffle: If set, shuffle the Documents before dividing into minibatches.
:yield: a Generator of `list` of IDs, each in the length of `batch_size`
"""
if not (isinstance(batch_size, int) and batch_size > 0):
raise ValueError('`batch_size` should be a positive integer')
N = len(self)
ix = self[:, 'id']
n_batches = int(np.ceil(N / batch_size))
if shuffle:
random.shuffle(ix)
for i in range(n_batches):
yield ix[i * batch_size : (i + 1) * batch_size]
|
import csv
import logging
import os
from typing import List
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: List[List[str]],
labels: List[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info("Accuracy: {:.2f}\t(Threshold: {:.4f})".format(acc * 100, acc_threshold))
logger.info("F1: {:.2f}\t(Threshold: {:.4f})".format(f1 * 100, f1_threshold))
logger.info("Precision: {:.2f}".format(precision * 100))
logger.info("Recall: {:.2f}".format(recall * 100))
logger.info("Average Precision: {:.2f}\n".format(ap * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
import logging
from sklearn.metrics import average_precision_score
from typing import List
import numpy as np
import os
import csv
from ... import InputExample
from ...evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: List[List[str]],
labels: List[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info("Accuracy: {:.2f}\t(Threshold: {:.4f})".format(acc * 100, acc_threshold))
logger.info("F1: {:.2f}\t(Threshold: {:.4f})".format(f1 * 100, f1_threshold))
logger.info("Precision: {:.2f}".format(precision * 100))
logger.info("Recall: {:.2f}".format(recall * 100))
logger.info("Average Precision: {:.2f}\n".format(ap * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
from ._label import Label, OneHotLabel
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
from ._dataset_wrapper import wrap_dataset_for_transforms_v2 # type: ignore[attr-defined] # usort: skip
|
"""GraphQL Reader."""
from typing import Dict, List, Optional
import yaml
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GraphQLReader(BaseReader):
"""
GraphQL reader.
Combines all GraphQL results into the Document used by LlamaIndex.
Args:
uri (str): GraphQL uri.
headers (Optional[Dict]): Optional http headers.
"""
def __init__(
self,
uri: Optional[str] = None,
headers: Optional[Dict] = None,
) -> None:
"""Initialize with parameters."""
try:
from gql import Client
from gql.transport.requests import RequestsHTTPTransport
except ImportError:
raise ImportError("`gql` package not found, please run `pip install gql`")
if uri:
if uri is None:
raise ValueError("`uri` must be provided.")
if headers is None:
headers = {}
transport = RequestsHTTPTransport(url=uri, headers=headers)
self.client = Client(transport=transport, fetch_schema_from_transport=True)
def load_data(self, query: str, variables: Optional[Dict] = None) -> List[Document]:
"""
Run query with optional variables and turn results into documents.
Args:
query (str): GraphQL query string.
variables (Optional[Dict]): optional query parameters.
Returns:
List[Document]: A list of documents.
"""
try:
from gql import gql
except ImportError:
raise ImportError("`gql` package not found, please run `pip install gql`")
if variables is None:
variables = {}
documents = []
result = self.client.execute(gql(query), variable_values=variables)
for key in result:
entry = result[key]
if isinstance(entry, list):
documents.extend([Document(text=yaml.dump(v)) for v in entry])
else:
documents.append(Document(text=yaml.dump(entry)))
return documents
|
"""GraphQL Reader."""
from typing import Dict, List, Optional
import yaml
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class GraphQLReader(BaseReader):
"""GraphQL reader.
Combines all GraphQL results into the Document used by LlamaIndex.
Args:
uri (str): GraphQL uri.
headers (Optional[Dict]): Optional http headers.
"""
def __init__(
self,
uri: Optional[str] = None,
headers: Optional[Dict] = None,
) -> None:
"""Initialize with parameters."""
try:
from gql import Client
from gql.transport.requests import RequestsHTTPTransport
except ImportError:
raise ImportError("`gql` package not found, please run `pip install gql`")
if uri:
if uri is None:
raise ValueError("`uri` must be provided.")
if headers is None:
headers = {}
transport = RequestsHTTPTransport(url=uri, headers=headers)
self.client = Client(transport=transport, fetch_schema_from_transport=True)
def load_data(self, query: str, variables: Optional[Dict] = None) -> List[Document]:
"""Run query with optional variables and turn results into documents.
Args:
query (str): GraphQL query string.
variables (Optional[Dict]): optional query parameters.
Returns:
List[Document]: A list of documents.
"""
try:
from gql import gql
except ImportError:
raise ImportError("`gql` package not found, please run `pip install gql`")
if variables is None:
variables = {}
documents = []
result = self.client.execute(gql(query), variable_values=variables)
for key in result:
entry = result[key]
if isinstance(entry, list):
documents.extend([Document(text=yaml.dump(v)) for v in entry])
else:
documents.append(Document(text=yaml.dump(entry)))
return documents
|
from typing import Dict, List, Tuple
import pytest
from llama_index.core.schema import Document
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\nThis is a test.\nThis is another test.\nThis is a test v2."
)
return [Document(text=doc_text)]
@pytest.fixture()
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
index_kwargs = {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
}
query_kwargs = {
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
}
return index_kwargs, query_kwargs
|
from typing import Dict, List, Tuple
import pytest
from llama_index.core.schema import Document
from tests.mock_utils.mock_prompts import (
MOCK_INSERT_PROMPT,
MOCK_QUERY_PROMPT,
MOCK_REFINE_PROMPT,
MOCK_SUMMARY_PROMPT,
MOCK_TEXT_QA_PROMPT,
)
@pytest.fixture()
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(text=doc_text)]
@pytest.fixture()
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
index_kwargs = {
"summary_template": MOCK_SUMMARY_PROMPT,
"insert_prompt": MOCK_INSERT_PROMPT,
"num_children": 2,
}
query_kwargs = {
"query_template": MOCK_QUERY_PROMPT,
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
}
return index_kwargs, query_kwargs
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: SentenceTransformer, metrics: dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
"""
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: "SentenceTransformer", output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
metrics = {name + "_" + key: value for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(self, model: "SentenceTransformer", metrics: dict[str, Any]) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Remove "Evaluator" from the class name
2. Add a space before every capital letter
"""
class_name = self.__class__.__name__
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
import asyncio
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.asyncio()
async def test_get_graph_url(monkeypatch):
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Mock logging to graphistry
def mock_graphistry_return(username, password):
return True
import graphistry
monkeypatch.setattr(graphistry, "login", mock_graphistry_return)
# Mock render of graph
async def mock_render_return(graph):
return "link"
from cognee.shared import utils
monkeypatch.setattr(utils, "render_graph", mock_render_return)
await cogneeRAG.get_graph_url("password", "username")
from cognee.base_config import get_base_config
assert (
get_base_config().graphistry_password == "password"
), "Password was not set properly"
assert (
get_base_config().graphistry_username == "username"
), "Username was not set properly"
if __name__ == "__main__":
asyncio.run(test_get_graph_url())
|
import asyncio
import pytest
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.asyncio()
async def test_get_graph_url(monkeypatch):
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key="",
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
db_name="cognee_db",
)
# Mock logging to graphistry
def mock_graphistry_return(username, password):
return True
import graphistry
monkeypatch.setattr(graphistry, "login", mock_graphistry_return)
# Mock render of graph
async def mock_render_return(graph):
return "link"
from cognee.shared import utils
monkeypatch.setattr(utils, "render_graph", mock_render_return)
await cogneeRAG.get_graph_url("password", "username")
from cognee.base_config import get_base_config
assert (
get_base_config().graphistry_password == "password"
), "Password was not set properly"
assert (
get_base_config().graphistry_username == "username"
), "Username was not set properly"
if __name__ == "__main__":
asyncio.run(test_get_graph_url())
|
import os
import pickle
from typing import Optional, Iterable, Tuple
from jina import Executor, requests, DocumentArray
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
"""
def __init__(
self,
path_vectorizer: str = 'model/tfidf_vectorizer.pickle',
default_batch_size: int = 2048,
default_traversal_paths: Tuple[str] = ('r', ),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.path_vectorizer = path_vectorizer
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs):
"""
Generate the TF-IDF feature vector and store it in `doc.embedding` for each `doc` in `docs`.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text'
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
"""Update the documents with the embeddings generated by a tfidf"""
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
import os
import pickle
from typing import Optional, Iterable, Any, List, Tuple
from jina import Executor, requests, DocumentArray
from jina.excepts import PretrainedModelFileDoesNotExist
from jina_commons.batching import get_docs_batch_generator
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
"""
def __init__(
self,
path_vectorizer: str = 'model/tfidf_vectorizer.pickle',
default_batch_size: int = 2048,
default_traversal_paths: Tuple[str] = ('r', ),
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.path_vectorizer = path_vectorizer
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs):
"""
Generate the TF-IDF feature vector and store it in `doc.embedding` for each `doc` in `docs`.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text'
)
self._create_embeddings(document_batches_generator)
def _create_embeddings(self, document_batches_generator: Iterable):
"""Update the documents with the embeddings generated by a tfidf"""
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: list[str] # definition at the bottom of the script
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .opus decoding is supported if libsndfile >= 1.0.31:
# AUDIO_EXTENSIONS.extend([".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
from typing import List
import datasets
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .opus decoding is supported if libsndfile >= 1.0.31:
# AUDIO_EXTENSIONS.extend([".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve
__all__ = ["add_noise", "barkscale_fbanks", "convolve", "fftconvolve"]
|
from .functional import add_noise, convolve, fftconvolve
__all__ = ["add_noise", "convolve", "fftconvolve"]
|
import pytest
from huggingface_hub import snapshot_download
@pytest.fixture
def dataset_dir(tmp_path):
dataset_dir = tmp_path / "test_command_dataset_dir"
snapshot_download("hf-internal-testing/ner-jsonl", repo_type="dataset", local_dir=dataset_dir)
return str(dataset_dir)
|
import pytest
DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__"
DATASET_LOADING_SCRIPT_CODE = """
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def dataset_loading_script_name():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def dataset_loading_script_code():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path):
script_name = dataset_loading_script_name
script_dir = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=True)
script_path = script_dir / f"{script_name}.py"
with open(script_path, "w") as f:
f.write(dataset_loading_script_code)
return str(script_dir)
|
"""Init params."""
from llama_index.finetuning.openai.base import OpenAIFinetuneEngine
__all__ = ["OpenAIFinetuneEngine"]
|
"""Init params."""
from llama_index.finetuning.openai.base import OpenAIFinetuneEngine
__all__ = ["OpenAIFinetuneEngine"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
try:
import torch_dipu # noqa: F401
IS_DIPU_AVAILABLE = True
except Exception:
IS_DIPU_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def is_dipu_available() -> bool:
return IS_DIPU_AVAILABLE
def is_npu_support_full_precision() -> bool:
"""Returns True if npu devices support full precision training."""
version_of_support_full_precision = 220
return IS_NPU_AVAILABLE and npu_utils.get_soc_version(
) >= version_of_support_full_precision
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
elif is_dipu_available():
DEVICE = 'dipu'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
return DEVICE
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
import torch_npu.npu.utils as npu_utils
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def is_npu_support_full_precision() -> bool:
"""Returns True if npu devices support full precision training."""
version_of_support_full_precision = 220
return IS_NPU_AVAILABLE and npu_utils.get_soc_version(
) >= version_of_support_full_precision
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
return DEVICE
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
Expectations,
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
expected_texts = Expectations(
{
("rocm", (9, 5)): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now, and I"],
(None, None): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"],
("cuda", 8): ['Hello, today is a great day to start a new project. I have been working on a new project for a while now, and I'],
}
) # fmt: skip
EXPECTED_TEXTS = expected_texts.get_expectation()
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, revision="refs/pr/1").to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
Expectations,
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
expected_texts = Expectations(
{
("rocm", (9, 5)): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now, and I"],
("cuda", None): ["Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"],
}
) # fmt: skip
EXPECTED_TEXTS = expected_texts.get_expectation()
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, revision="refs/pr/1").to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
_base_ = './reppoints_moment_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg))
|
_base_ = './reppoints_moment_r50_fpn_1x_coco.py'
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg))
optimizer = dict(lr=0.01)
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def shutdown(self):
self.server.should_exit = True
await self.server.shutdown()
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
@property
def should_exit(self) -> bool:
return self.server.should_exit
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.structures.mask import BitmapMasks, PolygonMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
def construct_toy_data(poly2mask):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
results['img'] = img
results['img_shape'] = img.shape[:2]
results['gt_bboxes'] = np.array([[1, 0, 2, 2]], dtype=np.float32)
results['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
if poly2mask:
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
else:
raw_masks = [[np.array([1, 2, 1, 0, 2, 1], dtype=np.float32)]]
results['gt_masks'] = PolygonMasks(raw_masks, 3, 4)
results['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 13, 255], [255, 13, 255, 255]],
dtype=np.uint8)
return results
def check_result_same(results, pipeline_results, check_keys):
"""Check whether the ``pipeline_results`` is the same with the predefined
``results``.
Args:
results (dict): Predefined results which should be the standard
output of the transform pipeline.
pipeline_results (dict): Results processed by the transform
pipeline.
check_keys (tuple): Keys that need to be checked between
results and pipeline_results.
"""
for key in check_keys:
if results.get(key, None) is None:
continue
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert_allclose(pipeline_results[key].to_ndarray(),
results[key].to_ndarray())
else:
assert_allclose(pipeline_results[key], results[key])
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmengine.testing import assert_allclose
from mmdet.data_elements.mask import BitmapMasks, PolygonMasks
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
def construct_toy_data(poly2mask):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
results['img'] = img
results['img_shape'] = img.shape[:2]
results['gt_bboxes'] = np.array([[1, 0, 2, 2]], dtype=np.float32)
results['gt_bboxes_labels'] = np.array([13], dtype=np.int64)
if poly2mask:
gt_masks = np.array([[0, 1, 0, 0], [0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 3, 4)
else:
raw_masks = [[np.array([1, 2, 1, 0, 2, 1], dtype=np.float32)]]
results['gt_masks'] = PolygonMasks(raw_masks, 3, 4)
results['gt_ignore_flags'] = np.array(np.array([1], dtype=bool))
results['gt_seg_map'] = np.array(
[[255, 13, 255, 255], [255, 13, 13, 255], [255, 13, 255, 255]],
dtype=np.uint8)
return results
def check_result_same(results, pipeline_results, check_keys):
"""Check whether the ``pipeline_results`` is the same with the predefined
``results``.
Args:
results (dict): Predefined results which should be the standard
output of the transform pipeline.
pipeline_results (dict): Results processed by the transform
pipeline.
check_keys (tuple): Keys that need to be checked between
results and pipeline_results.
"""
for key in check_keys:
if results.get(key, None) is None:
continue
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert_allclose(pipeline_results[key].to_ndarray(),
results[key].to_ndarray())
else:
assert_allclose(pipeline_results[key], results[key])
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from mmdet.registry import MODELS
from .anchor_head import AnchorHead
@MODELS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes,
num_ins,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes,
num_ins,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_big_gpu_with_torch_cuda,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class WanTransformer3DModelText2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors"
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
@require_big_gpu_with_torch_cuda
@require_torch_accelerator
class WanTransformer3DModelImage2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors"
repo_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
torch_dtype = torch.float8_e4m3fn
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer", torch_dtype=self.torch_dtype)
model_single_file = self.model_class.from_single_file(self.ckpt_path, torch_dtype=self.torch_dtype)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_big_gpu_with_torch_cuda,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class WanTransformer3DModelText2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors"
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
@require_big_gpu_with_torch_cuda
@require_torch_accelerator
class WanTransformer3DModelImage2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors"
repo_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
torch_dtype = torch.float8_e4m3fn
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer", torch_dtype=self.torch_dtype)
model_single_file = self.model_class.from_single_file(self.ckpt_path, torch_dtype=self.torch_dtype)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
|
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
if docarray_v2:
from docarray import DocList
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoint: str,
target: Optional[str],
parameters: Optional[dict],
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(
endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
else:
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
if not docarray_v2:
da = DocumentArray([])
else:
da = DocList[batch[0].__class__]()
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.document_array_cls = da.__class__
req.data.docs = da
|
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoint: str,
target: Optional[str],
parameters: Optional[dict],
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(
endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
else:
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
da = DocumentArray([])
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.data.docs = da
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_help_non_exist():
s = subprocess.check_output(
['jina', 'help', 'abcdefg'],
stderr=subprocess.STDOUT,
)
assert 'misspelling' in s.decode()
def test_help_exist():
s = subprocess.check_output(
['jina', 'help', 'port'],
stderr=subprocess.STDOUT,
)
assert 'a CLI argument of Jina' in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = set_pod_parser().parse_args([])
a2 = set_ping_parser().parse_args(['executor', f'0.0.0.0:{a1.port}'])
a3 = set_ping_parser().parse_args(
['executor', f'0.0.0.1:{a1.port}', '--timeout', '1000']
)
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
|
import json
import os
import subprocess
import pytest
from jina.checker import NetworkChecker
from jina.jaml import JAML
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_deployment_parser, set_pod_parser
from jina.parsers.ping import set_ping_parser
from jina_cli.autocomplete import ac_table
from jina_cli.export import api_to_dict
from jina_cli.lookup import _build_lookup_table, lookup_and_print
def test_export_api(tmpdir):
with open(tmpdir / 'test.yml', 'w', encoding='utf8') as fp:
JAML.dump(api_to_dict(), fp)
with open(tmpdir / 'test.json', 'w', encoding='utf8') as fp:
json.dump(api_to_dict(), fp)
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_help_lookup(cli, capsys):
nkw2kw, kw2info = _build_lookup_table()
if cli not in {'--help', '--version', '--version-full'}:
assert cli in nkw2kw
lookup_and_print(cli)
captured = capsys.readouterr()
assert 'Traceback (most recent call last)' not in captured.out
def test_main_cli():
subprocess.check_call(['jina'])
def test_cli_help():
subprocess.check_call(['jina', 'help', 'deployment'])
def test_cli_warn_unknown_args():
subprocess.check_call(['jina', 'help', 'deployment', '--abcdefg'])
@pytest.mark.parametrize('cli', ac_table['commands'])
def test_all_cli(cli):
subprocess.check_call(['jina', cli, '--help'])
@pytest.mark.parametrize('smethod', ['fork', 'spawn'])
def test_all_start_method(smethod):
s = subprocess.check_output(
['jina', '-v'],
env=dict(os.environ, JINA_MP_START_METHOD=smethod),
stderr=subprocess.STDOUT,
)
assert 'UserWarning' in s.decode()
assert smethod in s.decode()
def test_parse_env_map():
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', '--env', 'key2=value2']
)
assert a.env == {'key1': 'value1', 'key2': 'value2'}
a = set_deployment_parser().parse_args(
['--env', 'key1=value1', 'key2=value2', 'key3=3']
)
assert a.env == {'key1': 'value1', 'key2': 'value2', 'key3': 3}
@pytest.mark.slow
def test_ping():
a1 = set_pod_parser().parse_args([])
a2 = set_ping_parser().parse_args(['0.0.0.0', str(a1.port)])
a3 = set_ping_parser().parse_args(['0.0.0.1', str(a1.port), '--timeout', '1000'])
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a2)
assert cm.value.code == 0
# test with bad address
with pytest.raises(SystemExit) as cm:
with PodFactory.build_pod(a1):
NetworkChecker(a3)
assert cm.value.code == 1
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Query Sparsity: Active Dimensions: 74.9, Sparsity Ratio: 0.9975
Model Corpus Sparsity: Active Dimensions: 174.8, Sparsity Ratio: 0.9943
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
import logging
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseNanoBEIREvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
"""
Average Queries: 49.92307692307692
Average Corpus: 4334.7692307692305
Aggregated for Score Function: dot
Accuracy@1: 58.72%
Accuracy@3: 75.37%
Accuracy@5: 80.76%
Accuracy@10: 87.07%
Precision@1: 58.72%
Recall@1: 35.61%
Precision@3: 36.31%
Recall@3: 50.84%
Precision@5: 27.72%
Recall@5: 56.55%
Precision@10: 19.18%
Recall@10: 64.21%
MRR@10: 0.6822
NDCG@10: 0.6204
Model Sparsity Stats Query : Row Non-Zero Mean: 74.93406589214618, Row Sparsity Mean: 0.9975449305314285
Model Sparsity Stats Corpus : Row Non-Zero Mean: 174.8070262028621, Row Sparsity Mean: 0.9942727547425491
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: NanoBEIR_mean_dot_ndcg@10
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6204
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope='session', autouse=True)
def create_model_weights():
path_to_embedding_array = os.path.join(TEST_DIR, 'unit', 'expected.npz')
path_to_embedding_batch = os.path.join(TEST_DIR, 'unit', 'expected_batch.npz')
if not os.path.isfile(path_to_embedding_array) or not os.path.isfile(path_to_embedding_batch):
os.system(f'python {os.path.join(TEST_DIR, "unit", "encoding_with_original_tfidf.py")}')
yield
if os.path.isfile(path_to_embedding_array):
os.remove(path_to_embedding_array)
if os.path.isfile(path_to_embedding_batch):
os.remove(path_to_embedding_batch)
|
_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"KnowledgeTriple": "langchain_community.graphs.networkx_graph",
"parse_triples": "langchain_community.graphs.networkx_graph",
"get_entities": "langchain_community.graphs.networkx_graph",
"NetworkxEntityGraph": "langchain_community.graphs",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"KnowledgeTriple",
"NetworkxEntityGraph",
"get_entities",
"parse_triples",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"KnowledgeTriple": "langchain_community.graphs.networkx_graph",
"parse_triples": "langchain_community.graphs.networkx_graph",
"get_entities": "langchain_community.graphs.networkx_graph",
"NetworkxEntityGraph": "langchain_community.graphs",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"KnowledgeTriple",
"parse_triples",
"get_entities",
"NetworkxEntityGraph",
]
|
"""CIFAR100 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar100.load_data")
def load_data(label_mode="fine"):
"""Loads the CIFAR100 dataset.
This is a dataset of 50,000 32x32 color training images and
10,000 test images, labeled over 100 fine-grained classes that are
grouped into 20 coarse-grained classes. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
Args:
label_mode: one of `"fine"`, `"coarse"`.
If it is `"fine"`, the category labels
are the fine-grained labels, and if it is `"coarse"`,
the output labels are the coarse-grained superclasses.
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
if label_mode not in ["fine", "coarse"]:
raise ValueError(
'`label_mode` must be one of `"fine"`, `"coarse"`. '
f"Received: label_mode={label_mode}."
)
dirname = "cifar-100-python-target"
origin = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
extract=True,
file_hash=( # noqa: E501
"85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
),
)
path = os.path.join(path, "cifar-100-python")
fpath = os.path.join(path, "train")
x_train, y_train = load_batch(fpath, label_key=label_mode + "_labels")
fpath = os.path.join(path, "test")
x_test, y_test = load_batch(fpath, label_key=label_mode + "_labels")
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
"""CIFAR100 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar100.load_data")
def load_data(label_mode="fine"):
"""Loads the CIFAR100 dataset.
This is a dataset of 50,000 32x32 color training images and
10,000 test images, labeled over 100 fine-grained classes that are
grouped into 20 coarse-grained classes. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
Args:
label_mode: one of `"fine"`, `"coarse"`.
If it is `"fine"`, the category labels
are the fine-grained labels, and if it is `"coarse"`,
the output labels are the coarse-grained superclasses.
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-99)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar100.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
if label_mode not in ["fine", "coarse"]:
raise ValueError(
'`label_mode` must be one of `"fine"`, `"coarse"`. '
f"Received: label_mode={label_mode}."
)
dirname = "cifar-100-python"
origin = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
untar=True,
file_hash=( # noqa: E501
"85cd44d02ba6437773c5bbd22e183051d648de2e7d6b014e1ef29b855ba677a7"
),
)
fpath = os.path.join(path, "train")
x_train, y_train = load_batch(fpath, label_key=label_mode + "_labels")
fpath = os.path.join(path, "test")
x_test, y_test = load_batch(fpath, label_key=label_mode + "_labels")
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
"""Test volc engine maas chat model."""
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test valid chat call to volc engine."""
chat = VolcEngineMaasChat()
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = VolcEngineMaasChat()
response = chat.invoke(
[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = VolcEngineMaasChat(streaming=True)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
],
stream=True,
config={"callbacks": callback_manager},
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_stop() -> None:
"""Test that stop works."""
chat = VolcEngineMaasChat(
model="skylark2-pro-4k", model_version="1.2", streaming=True
)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
[
HumanMessage(content="repeat: hello world"),
AIMessage(content="hello world"),
HumanMessage(content="repeat: hello world"),
],
stream=True,
config={"callbacks": callback_manager},
stop=["world"],
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
assert response.content.rstrip() == "hello"
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = VolcEngineMaasChat()
message = HumanMessage(content="Hi, how are you?")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
"""Test volc engine maas chat model."""
from langchain_core.callbacks import CallbackManager
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_community.chat_models.volcengine_maas import VolcEngineMaasChat
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def test_default_call() -> None:
"""Test valid chat call to volc engine."""
chat = VolcEngineMaasChat() # type: ignore[call-arg]
response = chat.invoke([HumanMessage(content="Hello")])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_multiple_history() -> None:
"""Tests multiple history works."""
chat = VolcEngineMaasChat() # type: ignore[call-arg]
response = chat.invoke(
[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
]
)
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
def test_stream() -> None:
"""Test that stream works."""
chat = VolcEngineMaasChat(streaming=True) # type: ignore[call-arg]
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
[
HumanMessage(content="Hello"),
AIMessage(content="Hello!"),
HumanMessage(content="How are you?"),
],
stream=True,
config={"callbacks": callback_manager},
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
def test_stop() -> None:
"""Test that stop works."""
chat = VolcEngineMaasChat( # type: ignore[call-arg]
model="skylark2-pro-4k", model_version="1.2", streaming=True
)
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
response = chat.invoke(
[
HumanMessage(content="repeat: hello world"),
AIMessage(content="hello world"),
HumanMessage(content="repeat: hello world"),
],
stream=True,
config={"callbacks": callback_manager},
stop=["world"],
)
assert callback_handler.llm_streams > 0
assert isinstance(response.content, str)
assert response.content.rstrip() == "hello"
def test_multiple_messages() -> None:
"""Tests multiple messages works."""
chat = VolcEngineMaasChat() # type: ignore[call-arg]
message = HumanMessage(content="Hi, how are you?")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
assert len(generations) == 1
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_x101-32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
"""Init file."""
from llama_index.readers.papers.arxiv.base import ArxivReader
from llama_index.readers.papers.pubmed.base import PubmedReader
__all__ = ["ArxivReader", "PubmedReader"]
|
"""Init file."""
from llama_index.readers.papers.arxiv.base import ArxivReader
from llama_index.readers.papers.pubmed.base import PubmedReader
__all__ = ["ArxivReader", "PubmedReader"]
|
# coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import github
import json
from github import Github
import re
from collections import Counter
from pathlib import Path
def pattern_to_regex(pattern):
if pattern.startswith("/"):
start_anchor = True
pattern = re.escape(pattern[1:])
else:
start_anchor = False
pattern = re.escape(pattern)
# Replace `*` with "any number of non-slash characters"
pattern = pattern.replace(r"\*", "[^/]*")
if start_anchor:
pattern = r"^\/?" + pattern # Allow an optional leading slash after the start of the string
return pattern
def get_file_owners(file_path, codeowners_lines):
# Process lines in reverse (last matching pattern takes precedence)
for line in reversed(codeowners_lines):
# Skip comments and empty lines, strip inline comments
line = line.split('#')[0].strip()
if not line:
continue
# Split into pattern and owners
parts = line.split()
pattern = parts[0]
# Can be empty, e.g. for dummy files with explicitly no owner!
owners = [owner.removeprefix("@") for owner in parts[1:]]
# Check if file matches pattern
file_regex = pattern_to_regex(pattern)
if re.search(file_regex, file_path) is not None:
return owners # Remember, can still be empty!
return [] # Should never happen, but just in case
def pr_author_is_in_hf(pr_author, codeowners_lines):
# Check if the PR author is in the codeowners file
for line in codeowners_lines:
line = line.split('#')[0].strip()
if not line:
continue
# Split into pattern and owners
parts = line.split()
owners = [owner.removeprefix("@") for owner in parts[1:]]
if pr_author in owners:
return True
return False
def main():
script_dir = Path(__file__).parent.absolute()
with open(script_dir / "codeowners_for_review_action") as f:
codeowners_lines = f.readlines()
g = Github(os.environ['GITHUB_TOKEN'])
repo = g.get_repo("huggingface/transformers")
with open(os.environ['GITHUB_EVENT_PATH']) as f:
event = json.load(f)
# The PR number is available in the event payload
pr_number = event['pull_request']['number']
pr = repo.get_pull(pr_number)
pr_author = pr.user.login
if pr_author_is_in_hf(pr_author, codeowners_lines):
print(f"PR author {pr_author} is in codeowners, skipping review request.")
return
existing_reviews = list(pr.get_reviews())
if existing_reviews:
print(f"Already has reviews: {[r.user.login for r in existing_reviews]}")
return
users_requested, teams_requested = pr.get_review_requests()
users_requested = list(users_requested)
if users_requested:
print(f"Reviewers already requested: {users_requested}")
return
locs_per_owner = Counter()
for file in pr.get_files():
owners = get_file_owners(file.filename, codeowners_lines)
for owner in owners:
locs_per_owner[owner] += file.changes
# Assign the top 2 based on locs changed as reviewers, but skip the owner if present
locs_per_owner.pop(pr_author, None)
top_owners = locs_per_owner.most_common(2)
print("Top owners", top_owners)
top_owners = [owner[0] for owner in top_owners]
try:
pr.create_review_request(top_owners)
except github.GithubException as e:
print(f"Failed to request review for {top_owners}: {e}")
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import github
import json
from github import Github
import re
from collections import Counter
from pathlib import Path
def pattern_to_regex(pattern):
if pattern.startswith("/"):
start_anchor = True
pattern = re.escape(pattern[1:])
else:
start_anchor = False
pattern = re.escape(pattern)
# Replace `*` with "any number of non-slash characters"
pattern = pattern.replace(r"\*", "[^/]*")
if start_anchor:
pattern = r"^\/?" + pattern # Allow an optional leading slash after the start of the string
return pattern
def get_file_owners(file_path, codeowners_lines):
# Process lines in reverse (last matching pattern takes precedence)
for line in reversed(codeowners_lines):
# Skip comments and empty lines, strip inline comments
line = line.split('#')[0].strip()
if not line:
continue
# Split into pattern and owners
parts = line.split()
pattern = parts[0]
# Can be empty, e.g. for dummy files with explicitly no owner!
owners = [owner.removeprefix("@") for owner in parts[1:]]
# Check if file matches pattern
file_regex = pattern_to_regex(pattern)
if re.search(file_regex, file_path) is not None:
return owners # Remember, can still be empty!
return [] # Should never happen, but just in case
def main():
script_dir = Path(__file__).parent.absolute()
with open(script_dir / "codeowners_for_review_action") as f:
codeowners_lines = f.readlines()
g = Github(os.environ['GITHUB_TOKEN'])
repo = g.get_repo("huggingface/transformers")
with open(os.environ['GITHUB_EVENT_PATH']) as f:
event = json.load(f)
# The PR number is available in the event payload
pr_number = event['pull_request']['number']
pr = repo.get_pull(pr_number)
pr_author = pr.user.login
existing_reviews = list(pr.get_reviews())
if existing_reviews:
print(f"Already has reviews: {[r.user.login for r in existing_reviews]}")
return
users_requested, teams_requested = pr.get_review_requests()
users_requested = list(users_requested)
if users_requested:
print(f"Reviewers already requested: {users_requested}")
return
locs_per_owner = Counter()
for file in pr.get_files():
owners = get_file_owners(file.filename, codeowners_lines)
for owner in owners:
locs_per_owner[owner] += file.changes
# Assign the top 2 based on locs changed as reviewers, but skip the owner if present
locs_per_owner.pop(pr_author, None)
top_owners = locs_per_owner.most_common(2)
print("Top owners", top_owners)
top_owners = [owner[0] for owner in top_owners]
try:
pr.create_review_request(top_owners)
except github.GithubException as e:
print(f"Failed to request review for {top_owners}: {e}")
if __name__ == "__main__":
main()
|
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
if is_module_available("unidecode") and is_module_available("inflect"):
from pipeline_tacotron2.text.numbers import (
_expand_decimal_point,
_expand_dollars,
_expand_number,
_expand_ordinal,
_expand_pounds,
_remove_commas,
)
from pipeline_tacotron2.text.text_preprocessing import text_to_sequence
@skipIfNoModule("unidecode")
@skipIfNoModule("inflect")
class TestTextPreprocessor(TorchaudioTestCase):
@parameterized.expand(
[
["dr. Strange?", [15, 26, 14, 31, 26, 29, 11, 30, 31, 29, 12, 25, 18, 16, 10]],
["ML, is fun.", [24, 23, 6, 11, 20, 30, 11, 17, 32, 25, 7]],
["I love torchaudio!", [20, 11, 23, 26, 33, 16, 11, 31, 26, 29, 14, 19, 12, 32, 15, 20, 26, 2]],
# 'one thousand dollars, twenty cents'
[
"$1,000.20",
[
26,
25,
16,
11,
31,
19,
26,
32,
30,
12,
25,
15,
11,
15,
26,
23,
23,
12,
29,
30,
6,
11,
31,
34,
16,
25,
31,
36,
11,
14,
16,
25,
31,
30,
],
],
]
)
def test_text_to_sequence(self, sent, seq):
assert text_to_sequence(sent) == seq
@parameterized.expand(
[
["He, she, and I have $1,000", "He, she, and I have $1000"],
]
)
def test_remove_commas(self, sent, truth):
assert _remove_commas(sent) == truth
@parameterized.expand(
[
["He, she, and I have £1000", "He, she, and I have 1000 pounds"],
]
)
def test_expand_pounds(self, sent, truth):
assert _expand_pounds(sent) == truth
@parameterized.expand(
[
["He, she, and I have $1000", "He, she, and I have 1000 dollars"],
["He, she, and I have $3000.01", "He, she, and I have 3000 dollars, 1 cent"],
[
"He has $500.20 and she has $1000.50.",
"He has 500 dollars, 20 cents and she has 1000 dollars, 50 cents.",
],
]
)
def test_expand_dollars(self, sent, truth):
assert _expand_dollars(sent) == truth
@parameterized.expand(
[
["1000.20", "1000 point 20"],
["1000.1", "1000 point 1"],
]
)
def test_expand_decimal_point(self, sent, truth):
assert _expand_decimal_point(sent) == truth
@parameterized.expand(
[
["21st centry", "twenty-first centry"],
["20th centry", "twentieth centry"],
["2nd place.", "second place."],
]
)
def test_expand_ordinal(self, sent, truth):
assert _expand_ordinal(sent) == truth
_expand_ordinal,
@parameterized.expand(
[
["100020 dollars.", "one hundred thousand twenty dollars."],
[
"1234567890!",
"one billion, two hundred thirty-four million, "
"five hundred sixty-seven thousand, eight hundred ninety!",
],
]
)
def test_expand_number(self, sent, truth):
assert _expand_number(sent) == truth
|
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
if is_module_available("unidecode") and is_module_available("inflect"):
from pipeline_tacotron2.text.numbers import (
_remove_commas,
_expand_pounds,
_expand_dollars,
_expand_decimal_point,
_expand_ordinal,
_expand_number,
)
from pipeline_tacotron2.text.text_preprocessing import text_to_sequence
@skipIfNoModule("unidecode")
@skipIfNoModule("inflect")
class TestTextPreprocessor(TorchaudioTestCase):
@parameterized.expand(
[
["dr. Strange?", [15, 26, 14, 31, 26, 29, 11, 30, 31, 29, 12, 25, 18, 16, 10]],
["ML, is fun.", [24, 23, 6, 11, 20, 30, 11, 17, 32, 25, 7]],
["I love torchaudio!", [20, 11, 23, 26, 33, 16, 11, 31, 26, 29, 14, 19, 12, 32, 15, 20, 26, 2]],
# 'one thousand dollars, twenty cents'
[
"$1,000.20",
[
26,
25,
16,
11,
31,
19,
26,
32,
30,
12,
25,
15,
11,
15,
26,
23,
23,
12,
29,
30,
6,
11,
31,
34,
16,
25,
31,
36,
11,
14,
16,
25,
31,
30,
],
],
]
)
def test_text_to_sequence(self, sent, seq):
assert text_to_sequence(sent) == seq
@parameterized.expand(
[
["He, she, and I have $1,000", "He, she, and I have $1000"],
]
)
def test_remove_commas(self, sent, truth):
assert _remove_commas(sent) == truth
@parameterized.expand(
[
["He, she, and I have £1000", "He, she, and I have 1000 pounds"],
]
)
def test_expand_pounds(self, sent, truth):
assert _expand_pounds(sent) == truth
@parameterized.expand(
[
["He, she, and I have $1000", "He, she, and I have 1000 dollars"],
["He, she, and I have $3000.01", "He, she, and I have 3000 dollars, 1 cent"],
[
"He has $500.20 and she has $1000.50.",
"He has 500 dollars, 20 cents and she has 1000 dollars, 50 cents.",
],
]
)
def test_expand_dollars(self, sent, truth):
assert _expand_dollars(sent) == truth
@parameterized.expand(
[
["1000.20", "1000 point 20"],
["1000.1", "1000 point 1"],
]
)
def test_expand_decimal_point(self, sent, truth):
assert _expand_decimal_point(sent) == truth
@parameterized.expand(
[
["21st centry", "twenty-first centry"],
["20th centry", "twentieth centry"],
["2nd place.", "second place."],
]
)
def test_expand_ordinal(self, sent, truth):
assert _expand_ordinal(sent) == truth
_expand_ordinal,
@parameterized.expand(
[
["100020 dollars.", "one hundred thousand twenty dollars."],
[
"1234567890!",
"one billion, two hundred thirty-four million, "
"five hundred sixty-seven thousand, eight hundred ninety!",
],
]
)
def test_expand_number(self, sent, truth):
assert _expand_number(sent) == truth
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
input: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
else:
params = cast("dict", schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
from functools import partial
from inspect import isclass
from typing import Any, Union, cast
from pydantic import BaseModel
from langchain_core.language_models import FakeListChatModel
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.messages import HumanMessage
from langchain_core.prompts.structured import StructuredPrompt
from langchain_core.runnables.base import Runnable, RunnableLambda
from langchain_core.utils.pydantic import is_basemodel_subclass
def _fake_runnable(
input: Any, *, schema: Union[dict, type[BaseModel]], value: Any = 42, **_: Any
) -> Union[BaseModel, dict]:
if isclass(schema) and is_basemodel_subclass(schema):
return schema(name="yo", value=value)
else:
params = cast(dict, schema)["parameters"]
return {k: 1 if k != "value" else value for k, v in params.items()}
class FakeStructuredChatModel(FakeListChatModel):
"""Fake ChatModel for testing purposes."""
def with_structured_output(
self, schema: Union[dict, type[BaseModel]], **kwargs: Any
) -> Runnable:
return RunnableLambda(partial(_fake_runnable, schema=schema, **kwargs))
@property
def _llm_type(self) -> str:
return "fake-messages-list-chat-model"
FakeStructuredChatModel.model_rebuild()
def test_structured_prompt_pydantic() -> None:
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
OutputSchema,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=42)
def test_structured_prompt_dict() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 42}
def test_structured_prompt_kwargs() -> None:
prompt = StructuredPrompt(
[
("human", "I'm very structured, how about you?"),
],
{
"name": "yo",
"description": "a structured output",
"parameters": {
"name": {"type": "string"},
"value": {"type": "integer"},
},
},
value=7,
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
assert loads(dumps(prompt)).model_dump() == prompt.model_dump()
chain = loads(dumps(prompt)) | model
assert chain.invoke({"hello": "there"}) == {"name": 1, "value": 7}
class OutputSchema(BaseModel):
name: str
value: int
prompt = StructuredPrompt(
[("human", "I'm very structured, how about you?")], OutputSchema, value=7
)
model = FakeStructuredChatModel(responses=[])
chain = prompt | model
assert chain.invoke({"hello": "there"}) == OutputSchema(name="yo", value=7)
def test_structured_prompt_template_format() -> None:
prompt = StructuredPrompt(
[("human", "hi {{person.name}}")], schema={}, template_format="mustache"
)
assert prompt.messages[0].prompt.template_format == "mustache" # type: ignore[union-attr, union-attr]
assert prompt.input_variables == ["person"]
assert prompt.invoke({"person": {"name": "foo"}}).to_messages() == [
HumanMessage("hi foo")
]
|
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json():
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocumentArray[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: Image
def test_from_to_json():
da = DocumentArray[MyDoc](
[
MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=Image(url='aux.png')),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=Image()),
]
)
json_da = da.to_json()
da2 = DocumentArray[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
|
_base_ = './cascade-mask-rcnn_r50-caffe_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
# pylint: disable=protected-access
"""Shared typing definition."""
import ctypes
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
# os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/
# cudf.DataFrame/cupy.array/dlpack
import numpy as np
DataType = Any
FeatureInfo = Sequence[str]
FeatureNames = FeatureInfo
FeatureTypes = FeatureInfo
BoosterParam = Union[List, Dict[str, Any]] # better be sequence
ArrayLike = Any
PathLike = Union[str, os.PathLike]
CupyT = ArrayLike # maybe need a stub for cupy arrays
NumpyOrCupy = Any
NumpyDType = Union[str, Type[np.number]] # pylint: disable=invalid-name
PandasDType = Any # real type is pandas.core.dtypes.base.ExtensionDtype
FloatCompatible = Union[float, np.float32, np.float64]
# callables
FPreProcCallable = Callable
# ctypes
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64 # pylint: disable=C0103
ModelIn = Union[str, bytearray, os.PathLike]
CTypeT = TypeVar(
"CTypeT",
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_uint,
ctypes.c_size_t,
)
# supported numeric types
CNumeric = Union[
ctypes.c_float,
ctypes.c_double,
ctypes.c_uint,
ctypes.c_uint64,
ctypes.c_int32,
ctypes.c_int64,
]
# c pointer types
if TYPE_CHECKING:
CStrPtr = ctypes._Pointer[ctypes.c_char]
CStrPptr = ctypes._Pointer[ctypes.c_char_p]
CFloatPtr = ctypes._Pointer[ctypes.c_float]
CNumericPtr = Union[
ctypes._Pointer[ctypes.c_float],
ctypes._Pointer[ctypes.c_double],
ctypes._Pointer[ctypes.c_uint],
ctypes._Pointer[ctypes.c_uint64],
ctypes._Pointer[ctypes.c_int32],
ctypes._Pointer[ctypes.c_int64],
]
else:
CStrPtr = ctypes._Pointer
CStrPptr = ctypes._Pointer
CFloatPtr = ctypes._Pointer
CNumericPtr = Union[
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
]
# The second arg is actually Optional[List[cudf.Series]], skipped for easier type check.
# The cudf Series is the obtained cat codes, preserved in the `DataIter` to prevent it
# being freed.
TransformedData = Tuple[
Any, Optional[List], Optional[FeatureNames], Optional[FeatureTypes]
]
# template parameter
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., Any])
|
# pylint: disable=protected-access
"""Shared typing definition."""
import ctypes
import os
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Sequence,
Type,
TypeVar,
Union,
)
# os.PathLike/string/numpy.array/scipy.sparse/pd.DataFrame/dt.Frame/
# cudf.DataFrame/cupy.array/dlpack
import numpy as np
DataType = Any
# xgboost accepts some other possible types in practice due to historical reason, which is
# lesser tested. For now we encourage users to pass a simple list of string.
FeatureInfo = Sequence[str]
FeatureNames = FeatureInfo
FeatureTypes = FeatureInfo
BoosterParam = Union[List, Dict[str, Any]] # better be sequence
ArrayLike = Any
PathLike = Union[str, os.PathLike]
CupyT = ArrayLike # maybe need a stub for cupy arrays
NumpyOrCupy = Any
NumpyDType = Union[str, Type[np.number]] # pylint: disable=invalid-name
PandasDType = Any # real type is pandas.core.dtypes.base.ExtensionDtype
FloatCompatible = Union[float, np.float32, np.float64]
# callables
FPreProcCallable = Callable
# ctypes
# c_bst_ulong corresponds to bst_ulong defined in xgboost/c_api.h
c_bst_ulong = ctypes.c_uint64 # pylint: disable=C0103
ModelIn = Union[str, bytearray, os.PathLike]
CTypeT = TypeVar(
"CTypeT",
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_float,
ctypes.c_uint,
ctypes.c_size_t,
)
# supported numeric types
CNumeric = Union[
ctypes.c_float,
ctypes.c_double,
ctypes.c_uint,
ctypes.c_uint64,
ctypes.c_int32,
ctypes.c_int64,
]
# c pointer types
if TYPE_CHECKING:
CStrPtr = ctypes._Pointer[ctypes.c_char]
CStrPptr = ctypes._Pointer[ctypes.c_char_p]
CFloatPtr = ctypes._Pointer[ctypes.c_float]
CNumericPtr = Union[
ctypes._Pointer[ctypes.c_float],
ctypes._Pointer[ctypes.c_double],
ctypes._Pointer[ctypes.c_uint],
ctypes._Pointer[ctypes.c_uint64],
ctypes._Pointer[ctypes.c_int32],
ctypes._Pointer[ctypes.c_int64],
]
else:
CStrPtr = ctypes._Pointer
CStrPptr = ctypes._Pointer
CFloatPtr = ctypes._Pointer
CNumericPtr = Union[
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
ctypes._Pointer,
]
# template parameter
_T = TypeVar("_T")
_F = TypeVar("_F", bound=Callable[..., Any])
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from operator import itemgetter
import pytest
from jina import Executor, Document, DocumentArray
import cv2
from ...yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config('config.yml')
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path', [
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m'
]
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections', [
(os.path.join(cur_dir, '../data/models/yolov5s.pt'), {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(os.path.join(cur_dir, '../data/models/yolov5m.pt'), {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
]
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections', [
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
]
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray([
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
])
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'))
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from operator import itemgetter
import pytest
from jina import Executor, Document, DocumentArray
import cv2
from yolov5_segmenter import YoloV5Segmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
segmenter = Executor.load_config('config.yml')
assert segmenter.model_name_or_path == 'yolov5s'
@pytest.mark.parametrize(
'model_path', [
os.path.join(cur_dir, '../data/models/yolov5s.pt'),
os.path.join(cur_dir, '../data/models/yolov5m.pt'),
'yolov5s',
'yolov5m'
]
)
def test_model_name_or_path(build_da, model_path):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) > 0
for chunk in doc.chunks:
assert chunk.blob.ndim == 3
assert chunk.tags.get('label')
assert chunk.tags.get('conf')
@pytest.mark.parametrize(
'model_path, expected_detections', [
(os.path.join(cur_dir, '../data/models/yolov5s.pt'), {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(os.path.join(cur_dir, '../data/models/yolov5m.pt'), {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
]
)
def test_n_detections(build_da, model_path, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=model_path)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
@pytest.mark.parametrize(
'confidence_threshold, expected_detections', [
(0.3, {'bus.jpg': 6, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.5, {'bus.jpg': 5, 'zidane.jpg': 3, 'man.jpg': 3}),
(0.8, {'bus.jpg': 3, 'zidane.jpg': 2, 'man.jpg': 0}),
]
)
def test_confidence_threshold(build_da, confidence_threshold, expected_detections):
da = build_da()
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'),
default_confidence_threshold=confidence_threshold)
segmenter.segment(da, parameters={})
for doc in da:
assert len(doc.chunks) == expected_detections[doc.tags['filename']]
assert all(chunk.tags['conf'] >= confidence_threshold for chunk in doc.chunks)
def test_traversal_paths():
da = DocumentArray([
Document(
id='root',
blob=cv2.imread(os.path.join(cur_dir, '../data/img/man.jpg')),
),
])
segmenter = YoloV5Segmenter(model_name_or_path=os.path.join(cur_dir, '../data/models/yolov5m.pt'))
segmenter.segment(da, parameters={})
# detects 2 persons and 1 cell phone
assert len(da[0].chunks) == 3
assert da[0].chunks[0].tags['label'] == 'person'
assert da[0].chunks[1].tags['label'] == 'person'
assert da[0].chunks[2].tags['label'] == 'cell phone'
segmenter.segment(da, parameters={'traversal_paths': ['c']})
# the first detected person spans the whole image, so segmenting the chunk produces 3 detections
person_chunk = da[0].chunks[0]
assert len(person_chunk.chunks) == 3
assert person_chunk.chunks[0].tags['label'] == 'person'
assert person_chunk.chunks[1].tags['label'] == 'person'
assert person_chunk.chunks[2].tags['label'] == 'cell phone'
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train", trust_remote_code=True)
corpus = dataset["answer"]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(evaluation.SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], max_active_dims=k_dim))
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder, SparseEncoderTrainer, SparseEncoderTrainingArguments
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import evaluation, losses, models
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = models.CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=8, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = losses.CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(evaluation.SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], max_active_dims=k_dim))
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
import logging
from torch.utils.data import DataLoader
from tqdm import tqdm
from datasets import load_dataset
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""Init params."""
from llama_index.finetuning.rerankers.cohere_reranker import (
CohereRerankerFinetuneEngine,
)
from llama_index.finetuning.rerankers.dataset_gen import CohereRerankerFinetuneDataset
__all__ = ["CohereRerankerFinetuneEngine", "CohereRerankerFinetuneDataset"]
|
"""Init params."""
from llama_index.finetuning.rerankers.cohere_reranker import (
CohereRerankerFinetuneEngine,
)
from llama_index.finetuning.rerankers.dataset_gen import CohereRerankerFinetuneDataset
__all__ = ["CohereRerankerFinetuneEngine", "CohereRerankerFinetuneDataset"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[],
serialize_data=False,
lazy_init=False)
self.assertEqual(dataset.metainfo['CLASSES'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
self.assertListEqual(dataset.get_cat_ids(0), [0, 1])
def test_coco_dataset_without_filter_cfg(self):
# test CocoDataset without filter_cfg
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
pipeline=[])
self.assertEqual(len(dataset), 2)
# test with test_mode = True
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
test_mode=True,
pipeline=[])
self.assertEqual(len(dataset), 4)
def test_coco_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(CLASSES=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CocoDataset
class TestCocoDataset(unittest.TestCase):
def test_coco_dataset(self):
# test CocoDataset
metainfo = dict(CLASSES=('bus', 'car'), task_name='new_task')
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=[])
self.assertEqual(dataset.metainfo['CLASSES'], ('bus', 'car'))
self.assertEqual(dataset.metainfo['task_name'], 'new_task')
def test_coco_dataset_without_filter_cfg(self):
# test CocoDataset without filter_cfg
dataset = CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_sample.json',
pipeline=[])
self.assertEqual(len(dataset), 2)
def test_coco_annotation_ids_unique(self):
# test annotation ids not unique error
metainfo = dict(CLASSES=('car', ), task_name='new_task')
with self.assertRaisesRegex(AssertionError, 'are not unique!'):
CocoDataset(
data_prefix=dict(img='imgs'),
ann_file='tests/data/coco_wrong_format_sample.json',
metainfo=metainfo,
pipeline=[])
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2.utils import is_simple_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2.utils import is_simple_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(
self, inpt: Union[datapoints._TensorImageType, datapoints._TensorVideoType], params: Dict[str, Any]
) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
from __future__ import annotations
from .IDF import IDF
from .MLMTransformer import MLMTransformer
from .SparseAutoEncoder import SparseAutoEncoder
from .SpladePooling import SpladePooling
__all__ = ["SparseAutoEncoder", "MLMTransformer", "SpladePooling", "IDF"]
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .IDF import IDF
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
__all__ = ["CSRSparsity", "MLMTransformer", "SpladePooling", "IDF"]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
spend_credits = cast(
Callable[[Any, str, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
_base_ = './mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_zoedepth import *
from .image_processing_zoedepth import *
from .image_processing_zoedepth_fast import *
from .modeling_zoedepth import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_zoedepth import *
from .image_processing_zoedepth import *
from .modeling_zoedepth import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
"""Test VLite functionality."""
from langchain_core.documents import Document
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores import VLite
def test_vlite() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(texts=texts, embedding=FakeEmbeddings()) # type: ignore[call-arg]
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_vlite_with_metadatas() -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas,
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_vlite_with_metadatas_with_scores() -> None:
"""Test end to end construction and search with metadata and scores."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas,
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_vlite_update_document() -> None:
"""Test updating a document."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
ids=["1", "2", "3"],
)
docsearch.update_document("1", Document(page_content="updated_foo"))
output = docsearch.similarity_search("updated_foo", k=1)
assert output == [Document(page_content="updated_foo")]
def test_vlite_delete_document() -> None:
"""Test deleting a document."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
ids=["1", "2", "3"],
)
docsearch.delete(["1"])
output = docsearch.similarity_search("foo", k=3)
assert Document(page_content="foo") not in output
def test_vlite_get_documents() -> None:
"""Test getting documents by IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas,
ids=["1", "2", "3"],
)
output = docsearch.get(ids=["1", "3"])
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="baz", metadata={"page": "2"}),
]
def test_vlite_from_existing_index() -> None:
"""Test loading from an existing index."""
texts = ["foo", "bar", "baz"]
VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
collection="test_collection",
)
new_docsearch = VLite.from_existing_index(
collection="test_collection",
embedding=FakeEmbeddings(), # type: ignore[call-arg]
)
output = new_docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
|
"""Test VLite functionality."""
from langchain_core.documents import Document
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores import VLite
def test_vlite() -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(texts=texts, embedding=FakeEmbeddings()) # type: ignore[call-arg]
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_vlite_with_metadatas() -> None:
"""Test end to end construction and search with metadata."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas, # type: ignore[call-arg]
)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={"page": "0"})]
def test_vlite_with_metadatas_with_scores() -> None:
"""Test end to end construction and search with metadata and scores."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas, # type: ignore[call-arg]
)
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)]
def test_vlite_update_document() -> None:
"""Test updating a document."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
ids=["1", "2", "3"], # type: ignore[call-arg]
)
docsearch.update_document("1", Document(page_content="updated_foo"))
output = docsearch.similarity_search("updated_foo", k=1)
assert output == [Document(page_content="updated_foo")]
def test_vlite_delete_document() -> None:
"""Test deleting a document."""
texts = ["foo", "bar", "baz"]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
ids=["1", "2", "3"], # type: ignore[call-arg]
)
docsearch.delete(["1"])
output = docsearch.similarity_search("foo", k=3)
assert Document(page_content="foo") not in output
def test_vlite_get_documents() -> None:
"""Test getting documents by IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
metadatas=metadatas,
ids=["1", "2", "3"],
)
output = docsearch.get(ids=["1", "3"])
assert output == [
Document(page_content="foo", metadata={"page": "0"}),
Document(page_content="baz", metadata={"page": "2"}),
]
def test_vlite_from_existing_index() -> None:
"""Test loading from an existing index."""
texts = ["foo", "bar", "baz"]
VLite.from_texts(
texts=texts,
embedding=FakeEmbeddings(), # type: ignore[call-arg]
collection="test_collection", # type: ignore[call-arg]
)
new_docsearch = VLite.from_existing_index(
collection="test_collection",
embedding=FakeEmbeddings(), # type: ignore[call-arg]
)
output = new_docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
|
__version__ = "2.6.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
]
|
__version__ = "2.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"InputExample",
"CrossEncoder",
]
|
from .postgres_indexer import PostgreSQLStorage
|
from .postgres_indexer import PostgreSQLStorage
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
try:
import torch_npu # noqa: F401
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
IS_NPU_AVAILABLE = hasattr(torch, 'npu') and torch.npu.is_available()
except Exception:
IS_NPU_AVAILABLE = False
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
return IS_NPU_AVAILABLE
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
DEVICE = 'cpu'
if is_npu_available():
DEVICE = 'npu'
elif is_cuda_available():
DEVICE = 'cuda'
elif is_mlu_available():
DEVICE = 'mlu'
elif is_mps_available():
DEVICE = 'mps'
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
return DEVICE
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
from typing import Optional
import torch
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
try:
import torch_npu # noqa: F401
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
npu_jit_compile = bool(os.getenv('NPUJITCompile', False))
torch.npu.set_compile_mode(jit_compile=npu_jit_compile)
except Exception:
return False
return hasattr(torch, 'npu') and torch.npu.is_available()
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
if is_npu_available():
return 'npu'
elif is_cuda_available():
return 'cuda'
elif is_mlu_available():
return 'mlu'
elif is_mps_available():
return 'mps'
else:
return 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import FCOSHead
class TestFCOSHead(TestCase):
def test_fcos_head_loss(self):
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
fcos_head = FCOSHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in fcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = fcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `center_sampling` works fine.
fcos_head.center_sampling = True
ctrsamp_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()
ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()
ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()
self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(ctrsamp_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `norm_on_bbox` works fine.
fcos_head.norm_on_bbox = True
normbox_losses = fcos_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
normbox_cls_loss = normbox_losses['loss_cls'].item()
normbox_box_loss = normbox_losses['loss_bbox'].item()
normbox_ctr_loss = normbox_losses['loss_centerness'].item()
self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(normbox_ctr_loss, 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet.models.dense_heads import FCOSHead
class TestFCOSHead(TestCase):
def test_fcos_head_loss(self):
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
fcos_head = FCOSHead(
num_classes=4,
in_channels=1,
feat_channels=1,
stacked_convs=1,
norm_cfg=None)
# Fcos head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in fcos_head.prior_generator.strides)
cls_scores, bbox_preds, centernesses = fcos_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# box loss and centerness loss should be zero
empty_cls_loss = empty_gt_losses['loss_cls'].item()
empty_box_loss = empty_gt_losses['loss_bbox'].item()
empty_ctr_loss = empty_gt_losses['loss_centerness'].item()
self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero')
self.assertEqual(
empty_box_loss, 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_ctr_loss, 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then all cls, box loss and centerness loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].item()
onegt_box_loss = one_gt_losses['loss_bbox'].item()
onegt_ctr_loss = one_gt_losses['loss_centerness'].item()
self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(onegt_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `center_sampling` works fine.
fcos_head.center_sampling = True
ctrsamp_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item()
ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item()
ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item()
self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(ctrsamp_ctr_loss, 0,
'centerness loss should be non-zero')
# Test the `norm_on_bbox` works fine.
fcos_head.norm_on_bbox = True
normbox_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
normbox_cls_loss = normbox_losses['loss_cls'].item()
normbox_box_loss = normbox_losses['loss_bbox'].item()
normbox_ctr_loss = normbox_losses['loss_centerness'].item()
self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero')
self.assertGreater(normbox_ctr_loss, 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaHead(AnchorHead):
r"""An anchor-based head used in `RetinaNet
<https://arxiv.org/pdf/1708.02002.pdf>`_.
The head contains two subnetworks. The first classifies anchor boxes and
the second regresses deltas for the anchors.
Example:
>>> import torch
>>> self = RetinaHead(11, 7)
>>> x = torch.rand(1, 7, 32, 32)
>>> cls_score, bbox_pred = self.forward_single(x)
>>> # Each anchor predicts a score for each class except background
>>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
>>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
>>> assert cls_per_anchor == (self.num_classes)
>>> assert box_per_anchor == 4
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
init_cfg=dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)),
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(RetinaHead, self).__init__(
num_classes,
in_channels,
anchor_generator=anchor_generator,
init_cfg=init_cfg,
**kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale
level, the channels number is num_anchors * 4.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray.utils._internal._typing import safe_issubclass
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
T_doc = TypeVar('T_doc', bound=BaseDoc)
def create_doc(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Type['T_doc'] = BaseDoc, # type: ignore
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None, # type: ignore
__cls_kwargs__: Dict[str, Any] = None, # type: ignore
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['T_doc']:
"""
Dynamically create a subclass of BaseDoc. This is a wrapper around pydantic's create_model.
```python
from docarray.documents import Audio
from docarray.documents.helper import create_doc
from docarray.typing.tensor.audio import AudioNdArray
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(AudioNdArray, ...),
)
assert safe_issubclass(MyAudio, BaseDoc)
assert safe_issubclass(MyAudio, Audio)
```
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from, must be BaseDoc or its subclass
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>`
:return: the new Document class
"""
if not safe_issubclass(__base__, BaseDoc):
raise ValueError(f'{type(__base__)} is not a BaseDoc or its subclass')
doc = create_model(
__model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
__slots__=__slots__,
**field_definitions,
)
return doc
def create_doc_from_typeddict(
typeddict_cls: Type['TypedDict'], # type: ignore
**kwargs: Any,
):
"""
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict.
---
```python
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.documents import Audio
from docarray.documents.helper import create_doc_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
class MyAudio(TypedDict):
title: str
tensor: AudioNdArray
Doc = create_doc_from_typeddict(MyAudio, __base__=Audio)
assert safe_issubclass(Doc, BaseDoc)
assert safe_issubclass(Doc, Audio)
```
---
:param typeddict_cls: TypedDict class to use for the new Document class
:param kwargs: extra arguments to pass to `create_model_from_typeddict`
:return: the new Document class
"""
if '__base__' in kwargs:
if not safe_issubclass(kwargs['__base__'], BaseDoc):
raise ValueError(f'{kwargs["__base__"]} is not a BaseDoc or its subclass')
else:
kwargs['__base__'] = BaseDoc
doc = create_model_from_typeddict(typeddict_cls, **kwargs)
return doc
def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']:
"""
Create a subclass of BaseDoc based on example data given as a dictionary.
In case the example contains None as a value,
corresponding field will be viewed as the type Any.
---
```python
import numpy as np
from docarray.documents import ImageDoc
from docarray.documents.helper import create_doc_from_dict
data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert safe_issubclass(MyDoc, BaseDoc)
```
---
:param model_name: Name of the new Document class
:param data_dict: Dictionary of field types to their corresponding values.
:return: the new Document class
"""
if not data_dict:
raise ValueError('`data_dict` should contain at least one item')
field_types = {
field: (type(value) if value else Any, ...)
for field, value in data_dict.items()
}
return create_doc(__model_name=model_name, **field_types) # type: ignore
|
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type, TypeVar
from pydantic import create_model, create_model_from_typeddict
from pydantic.config import BaseConfig
from typing_extensions import TypedDict
from docarray import BaseDoc
if TYPE_CHECKING:
from pydantic.typing import AnyClassMethod
T_doc = TypeVar('T_doc', bound=BaseDoc)
def create_doc(
__model_name: str,
*,
__config__: Optional[Type[BaseConfig]] = None,
__base__: Type['T_doc'] = BaseDoc, # type: ignore
__module__: str = __name__,
__validators__: Dict[str, 'AnyClassMethod'] = None, # type: ignore
__cls_kwargs__: Dict[str, Any] = None, # type: ignore
__slots__: Optional[Tuple[str, ...]] = None,
**field_definitions: Any,
) -> Type['T_doc']:
"""
Dynamically create a subclass of BaseDoc. This is a wrapper around pydantic's create_model.
```python
from docarray.documents import Audio
from docarray.documents.helper import create_doc
from docarray.typing.tensor.audio import AudioNdArray
MyAudio = create_doc(
'MyAudio',
__base__=Audio,
title=(str, ...),
tensor=(AudioNdArray, ...),
)
assert issubclass(MyAudio, BaseDoc)
assert issubclass(MyAudio, Audio)
```
:param __model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from, must be BaseDoc or its subclass
:param __module__: module of the created model
:param __validators__: a dict of method names and @validator class methods
:param __cls_kwargs__: a dict for class creation
:param __slots__: Deprecated, `__slots__` should not be passed to `create_model`
:param field_definitions: fields of the model (or extra fields if a base is supplied)
in the format `<name>=(<type>, <default default>)` or `<name>=<default value>`
:return: the new Document class
"""
if not issubclass(__base__, BaseDoc):
raise ValueError(f'{type(__base__)} is not a BaseDoc or its subclass')
doc = create_model(
__model_name,
__config__=__config__,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
__slots__=__slots__,
**field_definitions,
)
return doc
def create_doc_from_typeddict(
typeddict_cls: Type['TypedDict'], # type: ignore
**kwargs: Any,
):
"""
Create a subclass of BaseDoc based on the fields of a `TypedDict`. This is a wrapper around pydantic's create_model_from_typeddict.
---
```python
from typing_extensions import TypedDict
from docarray import BaseDoc
from docarray.documents import Audio
from docarray.documents.helper import create_doc_from_typeddict
from docarray.typing.tensor.audio import AudioNdArray
class MyAudio(TypedDict):
title: str
tensor: AudioNdArray
Doc = create_doc_from_typeddict(MyAudio, __base__=Audio)
assert issubclass(Doc, BaseDoc)
assert issubclass(Doc, Audio)
```
---
:param typeddict_cls: TypedDict class to use for the new Document class
:param kwargs: extra arguments to pass to `create_model_from_typeddict`
:return: the new Document class
"""
if '__base__' in kwargs:
if not issubclass(kwargs['__base__'], BaseDoc):
raise ValueError(f'{kwargs["__base__"]} is not a BaseDoc or its subclass')
else:
kwargs['__base__'] = BaseDoc
doc = create_model_from_typeddict(typeddict_cls, **kwargs)
return doc
def create_doc_from_dict(model_name: str, data_dict: Dict[str, Any]) -> Type['T_doc']:
"""
Create a subclass of BaseDoc based on example data given as a dictionary.
In case the example contains None as a value,
corresponding field will be viewed as the type Any.
---
```python
import numpy as np
from docarray.documents import ImageDoc
from docarray.documents.helper import create_doc_from_dict
data_dict = {'image': ImageDoc(tensor=np.random.rand(3, 224, 224)), 'author': 'me'}
MyDoc = create_doc_from_dict(model_name='MyDoc', data_dict=data_dict)
assert issubclass(MyDoc, BaseDoc)
```
---
:param model_name: Name of the new Document class
:param data_dict: Dictionary of field types to their corresponding values.
:return: the new Document class
"""
if not data_dict:
raise ValueError('`data_dict` should contain at least one item')
field_types = {
field: (type(value) if value else Any, ...)
for field, value in data_dict.items()
}
return create_doc(__model_name=model_name, **field_types) # type: ignore
|
from langchain_core._api import warn_deprecated
from pydantic.v1.main import * # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
import numpy as np
import pytest
from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
)
def test_mutual_reachability_graph_error_sparse_format():
"""Check that we raise an error if the sparse format is not CSR."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, "sparse_csc")
err_msg = "Only sparse CSR matrices are supported"
with pytest.raises(ValueError, match=err_msg):
mutual_reachability_graph(X)
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
def test_mutual_reachability_graph_inplace(array_type):
"""Check that the operation is happening inplace."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
mr_graph = mutual_reachability_graph(X)
assert id(mr_graph) == id(X)
def test_mutual_reachability_graph_equivalence_dense_sparse():
"""Check that we get the same results for dense and sparse implementation."""
rng = np.random.RandomState(0)
X = rng.randn(5, 5)
X_dense = X.T @ X
X_sparse = _convert_container(X_dense, "sparse_csr")
mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3)
mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3)
assert_allclose(mr_graph_dense, mr_graph_sparse.toarray())
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_mutual_reachability_graph_preserves_dtype(array_type, dtype):
"""Check that the computation preserve dtype thanks to fused types."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = (X.T @ X).astype(dtype)
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
assert X.dtype == dtype
mr_graph = mutual_reachability_graph(X)
assert mr_graph.dtype == dtype
|
import numpy as np
import pytest
from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
)
def test_mutual_reachability_graph_error_sparse_format():
"""Check that we raise an error if the sparse format is not CSR."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, "sparse_csc")
err_msg = "Only sparse CSR matrices are supported"
with pytest.raises(ValueError, match=err_msg):
mutual_reachability_graph(X)
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
def test_mutual_reachability_graph_inplace(array_type):
"""Check that the operation is happening inplace."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
mr_graph = mutual_reachability_graph(X)
assert id(mr_graph) == id(X)
def test_mutual_reachability_graph_equivalence_dense_sparse():
"""Check that we get the same results for dense and sparse implementation."""
rng = np.random.RandomState(0)
X = rng.randn(5, 5)
X_dense = X.T @ X
X_sparse = _convert_container(X_dense, "sparse_csr")
mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3)
mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3)
assert_allclose(mr_graph_dense, mr_graph_sparse.toarray())
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_mutual_reachability_graph_preserve_dtype(array_type, dtype):
"""Check that the computation preserve dtype thanks to fused types."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = (X.T @ X).astype(dtype)
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
assert X.dtype == dtype
mr_graph = mutual_reachability_graph(X)
assert mr_graph.dtype == dtype
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
draw_pred=False,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import (
SentenceTransformer,
SentenceTransformerTrainer,
SentenceTransformerTrainingArguments,
losses,
)
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/2d_matryoshka_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, inner_train_loss, [768, 512, 256, 128, 64])
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="2d-matryoshka-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses MatryoshkaLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python 2d_matryoshka_sts.py
OR
python 2d_matryoshka_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/2d_matryoshka_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.Matryoshka2dLoss(model, train_loss, [768, 512, 256, 128, 64])
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-2d-matryoshka")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-2d-matryoshka')`."
)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode(sentences, batch_size=encode_batch_size, pool=pool, chunk_size=chunk_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
It also demonstrates how to stream data which is helpful in case you don't
want to wait for an extremely large dataset to download, or if you want to
limit the amount of memory used. More info about dataset streaming:
https://huggingface.co/docs/datasets/stream
"""
import logging
from datasets import load_dataset
from torch.utils.data import DataLoader
from tqdm import tqdm
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Set params
data_stream_size = 16384 # Size of the data that is loaded into memory at once
chunk_size = 1024 # Size of the chunks that are sent to each process
encode_batch_size = 128 # Batch size of the model
# Load a large dataset in streaming mode. more info: https://huggingface.co/docs/datasets/stream
dataset = load_dataset("yahoo_answers_topics", split="train", streaming=True)
dataloader = DataLoader(dataset.with_format("torch"), batch_size=data_stream_size)
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
for i, batch in enumerate(tqdm(dataloader)):
# Compute the embeddings using the multi-process pool
sentences = batch["best_answer"]
batch_emb = model.encode_multi_process(sentences, pool, chunk_size=chunk_size, batch_size=encode_batch_size)
print("Embeddings computed for 1 batch. Shape:", batch_emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
# test independent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 60, 128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(4, 224, 224))
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(100, 1))
# test dependent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60, 128))
with pytest.raises(ValueError):
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(shape))
assert tensor.__class__.__name__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__qualname__ == 'TorchTensor[3, 224, 224]'
assert f'{tensor[0][0][0]}' == 'TorchTensor[3, 224, 224](0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
def test_parametrized_subclass():
c1 = TorchTensor[128]
c2 = TorchTensor[128]
assert issubclass(c1, c2)
assert issubclass(c1, TorchTensor)
assert issubclass(c1, torch.Tensor)
assert not issubclass(c1, TorchTensor[256])
def test_parametrized_instance():
t = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(t, TorchTensor[128])
assert isinstance(t, TorchTensor)
assert isinstance(t, torch.Tensor)
assert not isinstance(t, TorchTensor[256])
def test_parametrized_equality():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert (t1 == t2).all()
def test_parametrized_operations():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, torch.Tensor)
assert isinstance(t_result, TorchTensor)
assert isinstance(t_result, TorchTensor[128])
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
# test independent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 60, 128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(4, 224, 224))
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(100, 1))
# test dependent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60, 128))
with pytest.raises(ValueError):
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(shape))
assert tensor.__class__.__name__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__qualname__ == 'TorchTensor[3, 224, 224]'
assert f'{tensor[0][0][0]}' == 'TorchTensor[3, 224, 224](0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
def test_parametrized_subclass():
c1 = TorchTensor[128]
c2 = TorchTensor[128]
assert issubclass(c1, c2)
assert issubclass(c1, TorchTensor)
assert issubclass(c1, torch.Tensor)
assert not issubclass(c1, TorchTensor[256])
def test_parametrized_instance():
t = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(t, TorchTensor[128])
assert isinstance(t, TorchTensor)
assert isinstance(t, torch.Tensor)
assert not isinstance(t, TorchTensor[256])
def test_parametrized_equality():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t3 = parse_obj_as(TorchTensor[256], torch.zeros(256))
assert (t1 == t2).all()
assert not t1 == t3
def test_parametrized_operations():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, torch.Tensor)
assert isinstance(t_result, TorchTensor)
assert isinstance(t_result, TorchTensor[128])
|
from typing import Optional, Any
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.tablestore import TablestoreKVStore
class TablestoreIndexStore(KVIndexStore):
"""
Tablestore Index store.
Args:
tablestore_kvstore (TablestoreKVStore): Tablestore key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the table name
"""
def __init__(
self,
tablestore_kvstore: TablestoreKVStore,
namespace: str = "llama_index_index_store_",
collection_suffix: str = "data",
) -> None:
"""Init a TablestoreIndexStore."""
super().__init__(
kvstore=tablestore_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
self._tablestore_kvstore = tablestore_kvstore
@classmethod
def from_config(
cls,
endpoint: Optional[str] = None,
instance_name: Optional[str] = None,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
**kwargs: Any,
) -> "TablestoreIndexStore":
"""Load a TablestoreIndexStore from config."""
kv_store = TablestoreKVStore(
endpoint=endpoint,
instance_name=instance_name,
access_key_id=access_key_id,
access_key_secret=access_key_secret,
kwargs=kwargs,
)
return cls(tablestore_kvstore=kv_store)
def delete_all_index(self):
"""Delete all index."""
self._tablestore_kvstore.delete_all(self._collection)
|
from typing import Optional, Any
from llama_index.core.storage.index_store.keyval_index_store import KVIndexStore
from llama_index.storage.kvstore.tablestore import TablestoreKVStore
class TablestoreIndexStore(KVIndexStore):
"""Tablestore Index store.
Args:
tablestore_kvstore (TablestoreKVStore): Tablestore key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the table name
"""
def __init__(
self,
tablestore_kvstore: TablestoreKVStore,
namespace: str = "llama_index_index_store_",
collection_suffix: str = "data",
) -> None:
"""Init a TablestoreIndexStore."""
super().__init__(
kvstore=tablestore_kvstore,
namespace=namespace,
collection_suffix=collection_suffix,
)
self._tablestore_kvstore = tablestore_kvstore
@classmethod
def from_config(
cls,
endpoint: Optional[str] = None,
instance_name: Optional[str] = None,
access_key_id: Optional[str] = None,
access_key_secret: Optional[str] = None,
**kwargs: Any,
) -> "TablestoreIndexStore":
"""Load a TablestoreIndexStore from config."""
kv_store = TablestoreKVStore(
endpoint=endpoint,
instance_name=instance_name,
access_key_id=access_key_id,
access_key_secret=access_key_secret,
kwargs=kwargs,
)
return cls(tablestore_kvstore=kv_store)
def delete_all_index(self):
"""Delete all index."""
self._tablestore_kvstore.delete_all(self._collection)
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core._api import beta
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
@beta()
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm = init_chat_model(model="openai:gpt-4o-mini")
callback = UsageMetadataCallbackHandler()
results = llm.batch(["Hello", "Goodbye"], config={"callbacks": [callback]})
print(callback.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: Optional[UsageMetadata] = None
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
else:
usage_metadata = None
except AttributeError:
usage_metadata = None
else:
usage_metadata = None
# update shared state behind lock
with self._lock:
self.usage_metadata = add_usage(self.usage_metadata, usage_metadata)
@contextmanager
@beta()
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm = init_chat_model(model="openai:gpt-4o-mini")
with get_usage_metadata_callback() as cb:
llm.invoke("Hello")
llm.invoke("Goodbye")
print(cb.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
"""Callback Handler that tracks AIMessage.usage_metadata."""
import threading
from collections.abc import Generator
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Optional
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata, add_usage
from langchain_core.outputs import ChatGeneration, LLMResult
class UsageMetadataCallbackHandler(BaseCallbackHandler):
"""Callback Handler that tracks AIMessage.usage_metadata.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import UsageMetadataCallbackHandler
llm = init_chat_model(model="openai:gpt-4o-mini")
callback = UsageMetadataCallbackHandler()
results = llm.batch(["Hello", "Goodbye"], config={"callbacks": [callback]})
print(callback.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
def __init__(self) -> None:
super().__init__()
self._lock = threading.Lock()
self.usage_metadata: Optional[UsageMetadata] = None
def __repr__(self) -> str:
return str(self.usage_metadata)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Collect token usage."""
# Check for usage_metadata (langchain-core >= 0.2.2)
try:
generation = response.generations[0][0]
except IndexError:
generation = None
if isinstance(generation, ChatGeneration):
try:
message = generation.message
if isinstance(message, AIMessage):
usage_metadata = message.usage_metadata
else:
usage_metadata = None
except AttributeError:
usage_metadata = None
else:
usage_metadata = None
# update shared state behind lock
with self._lock:
self.usage_metadata = add_usage(self.usage_metadata, usage_metadata)
@contextmanager
def get_usage_metadata_callback(
name: str = "usage_metadata_callback",
) -> Generator[UsageMetadataCallbackHandler, None, None]:
"""Get context manager for tracking usage metadata across chat model calls using
``AIMessage.usage_metadata``.
Args:
name (str): The name of the context variable. Defaults to
``"usage_metadata_callback"``.
Example:
.. code-block:: python
from langchain.chat_models import init_chat_model
from langchain_core.callbacks import get_usage_metadata_callback
llm = init_chat_model(model="openai:gpt-4o-mini")
with get_usage_metadata_callback() as cb:
llm.invoke("Hello")
llm.invoke("Goodbye")
print(cb.usage_metadata)
.. code-block:: none
{'output_token_details': {'audio': 0, 'reasoning': 0}, 'input_tokens': 17, 'output_tokens': 31, 'total_tokens': 48, 'input_token_details': {'cache_read': 0, 'audio': 0}}
.. versionadded:: 0.3.49
""" # noqa: E501
from langchain_core.tracers.context import register_configure_hook
usage_metadata_callback_var: ContextVar[Optional[UsageMetadataCallbackHandler]] = (
ContextVar(name, default=None)
)
register_configure_hook(usage_metadata_callback_var, True)
cb = UsageMetadataCallbackHandler()
usage_metadata_callback_var.set(cb)
yield cb
usage_metadata_callback_var.set(None)
|
_base_ = './tood_r50_fpn_1x_coco.py'
max_epochs = 24
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
# training schedule for 2x
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# multi-scale training
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 480), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './tood_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
# multi-scale training
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 480), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.callbacks.splade_callbacks import (
SchedulerType,
SpladeLambdaSchedulerCallback,
)
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Callbacks
"SpladeLambdaSchedulerCallback",
"SchedulerType",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import testing
class MaskingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
|
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import testing
class MaskingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
|
import os
import socket
from typing import TYPE_CHECKING, Optional
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param client: docker client object
:return: network id if exists
"""
import docker
if TYPE_CHECKING: # pragma: no cover
from docker.models.containers import Container
container: 'Container' = None
try:
hostname = socket.gethostname()
container = client.containers.get(hostname)
except docker.errors.NotFound:
try:
# https://stackoverflow.com/a/52988227/15683245
with open('/proc/1/cpuset', encoding='utf-8') as f:
hostname = os.path.basename(f.read().rstrip())
container = client.containers.get(hostname)
except Exception:
return None
try:
networks = container.attrs['NetworkSettings']['Networks']
if networks:
net_mode = list(networks.keys())[0]
return networks[net_mode]['NetworkID']
else:
return None
except Exception:
return None
def get_gpu_device_requests(gpu_args):
"""Get docker device requests from gpu args
:param gpu_args: gpu args fr
:return: docker device requests
"""
import docker
_gpus = {
'count': 0,
'capabilities': ['gpu'],
'device': [],
'driver': '',
}
for gpu_arg in gpu_args.split(','):
if gpu_arg == 'all':
_gpus['count'] = -1
if gpu_arg.isdigit():
_gpus['count'] = int(gpu_arg)
if '=' in gpu_arg:
gpu_arg_key, gpu_arg_value = gpu_arg.split('=')
if gpu_arg_key in _gpus.keys():
if isinstance(_gpus[gpu_arg_key], list):
_gpus[gpu_arg_key].append(gpu_arg_value)
else:
_gpus[gpu_arg_key] = gpu_arg_value
device_requests = [
docker.types.DeviceRequest(
count=_gpus['count'],
driver=_gpus['driver'],
device_ids=_gpus['device'],
capabilities=[_gpus['capabilities']],
)
]
return device_requests
|
import os
import socket
from typing import TYPE_CHECKING, Optional
def get_docker_network(client) -> Optional[str]:
"""Do a best-effort guess if the caller is already in a docker network
Check if `hostname` exists in list of docker containers.
If a container is found, check its network id
:param client: docker client object
:return: network id if exists
"""
import docker
if TYPE_CHECKING: # pragma: no cover
from docker.models.containers import Container
container: 'Container' = None
try:
hostname = socket.gethostname()
container = client.containers.get(hostname)
except docker.errors.NotFound:
try:
# https://stackoverflow.com/a/52988227/15683245
with open('/proc/1/cpuset') as f:
hostname = os.path.basename(f.read().rstrip())
container = client.containers.get(hostname)
except Exception:
return None
try:
networks = container.attrs['NetworkSettings']['Networks']
if networks:
net_mode = list(networks.keys())[0]
return networks[net_mode]['NetworkID']
else:
return None
except Exception:
return None
def get_gpu_device_requests(gpu_args):
"""Get docker device requests from gpu args
:param gpu_args: gpu args fr
:return: docker device requests
"""
import docker
_gpus = {
'count': 0,
'capabilities': ['gpu'],
'device': [],
'driver': '',
}
for gpu_arg in gpu_args.split(','):
if gpu_arg == 'all':
_gpus['count'] = -1
if gpu_arg.isdigit():
_gpus['count'] = int(gpu_arg)
if '=' in gpu_arg:
gpu_arg_key, gpu_arg_value = gpu_arg.split('=')
if gpu_arg_key in _gpus.keys():
if isinstance(_gpus[gpu_arg_key], list):
_gpus[gpu_arg_key].append(gpu_arg_value)
else:
_gpus[gpu_arg_key] = gpu_arg_value
device_requests = [
docker.types.DeviceRequest(
count=_gpus['count'],
driver=_gpus['driver'],
device_ids=_gpus['device'],
capabilities=[_gpus['capabilities']],
)
]
return device_requests
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
# [7] yields higher performance than [6]
milestones=[7],
gamma=0.1)
]
train_cfg = dict(by_epoch=True, max_epochs=8) # actual epoch = 8 * 8 = 64
val_cfg = dict(interval=1)
test_cfg = dict()
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
# TODO: support auto scaling lr
# auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_detection.py',
'../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
def test_batch_iterate(
input_size: int, input_iterable: list[str], expected_output: list[str]
) -> None:
"""Test batching function."""
assert list(batch_iterate(input_size, input_iterable)) == expected_output
|
import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
"input_size, input_iterable, expected_output",
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
def test_batch_iterate(
input_size: int, input_iterable: list[str], expected_output: list[str]
) -> None:
"""Test batching function."""
assert list(batch_iterate(input_size, input_iterable)) == expected_output
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB_PLUS, SourceSeparationBundle
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"SourceSeparationBundle",
"HDEMUCS_HIGH_MUSDB_PLUS",
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
from .source_separation_pipeline import CONVTASNET_BASE_LIBRI2MIX, SourceSeparationBundle
__all__ = [
"CONVTASNET_BASE_LIBRI2MIX",
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"SourceSeparationBundle",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
def palette_val(palette):
"""Convert palette to matplotlib palette.
Args:
palette List[tuple]: A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette, num_classes):
"""Get palette from various inputs.
Args:
palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs.
num_classes (int): the number of classes.
Returns:
list[tuple[int]]: A list of color tuples.
"""
assert isinstance(num_classes, int)
if isinstance(palette, list):
dataset_palette = palette
elif isinstance(palette, tuple):
dataset_palette = [palette] * num_classes
elif palette == 'random' or palette is None:
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
dataset_palette = [tuple(c) for c in palette]
elif palette == 'coco':
from mmdet.datasets import CocoDataset, CocoPanopticDataset
dataset_palette = CocoDataset.PALETTE
if len(dataset_palette) < num_classes:
dataset_palette = CocoPanopticDataset.PALETTE
elif palette == 'citys':
from mmdet.datasets import CityscapesDataset
dataset_palette = CityscapesDataset.PALETTE
elif palette == 'voc':
from mmdet.datasets import VOCDataset
dataset_palette = VOCDataset.PALETTE
elif mmcv.is_str(palette):
dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
assert len(dataset_palette) >= num_classes, \
'The length of palette should not be less than `num_classes`.'
return dataset_palette
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import mmdet
def palette_val(palette):
"""Convert palette to matplotlib palette.
Args:
palette List[tuple]: A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette
def get_palette(palette, num_classes=None):
"""Get palette from various inputs.
Args:
palette (list[tuple]/str/tuple/:obj:`Color`): palette inputs
Returns:
list[tuple[int]]: A list of color tuples.
"""
if isinstance(palette, list):
return palette
elif isinstance(palette, tuple):
assert isinstance(num_classes, int)
return [palette] * num_classes
elif palette == 'coco':
return mmdet.datasets.CocoDataset.PALETTE
elif palette == 'voc':
return mmdet.datasets.VOCDataset.PALETTE
elif palette == 'citys':
return mmdet.datasets.CityscapesDataset.PALETTE
elif palette == 'random' or palette is None:
assert isinstance(num_classes, int)
state = np.random.get_state()
# random color
np.random.seed(42)
palette = np.random.randint(0, 256, size=(num_classes, 3))
np.random.set_state(state)
return [tuple(c) for c in palette]
elif mmcv.is_str(palette):
assert isinstance(num_classes, int)
return [mmcv.color_val(palette)[::-1]] * num_classes
else:
raise TypeError(f'Invalid type for palette: {type(palette)}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._utils import (demo_mm_inputs, demo_mm_proposals,
demo_mm_sampling_results, get_detector_cfg,
get_roi_head_cfg)
__all__ = [
'demo_mm_inputs', 'get_detector_cfg', 'get_roi_head_cfg',
'demo_mm_proposals', 'demo_mm_sampling_results'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._utils import demo_mm_inputs, get_detector_cfg
__all__ = ['demo_mm_inputs', 'get_detector_cfg']
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _get_fill, _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import has_any, is_simple_tensor, query_bounding_boxes, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Union[Type, str], datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = query_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
)
if params["needs_pad"]:
fill = _get_fill(self._fill, type(inpt))
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import has_any, is_simple_tensor, query_bounding_boxes, query_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBoxes) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBoxes is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = query_bounding_boxes(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, canvas_size = F.crop_bounding_boxes(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_boxes(bounding_boxes, format=format, canvas_size=canvas_size)
height_and_width = F.convert_format_bounding_boxes(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBoxes):
inpt = datapoints.BoundingBoxes.wrap_like(
inpt,
F.clamp_bounding_boxes(inpt[params["is_valid"]], format=inpt.format, canvas_size=inpt.canvas_size),
)
if params["needs_pad"]:
fill = self._fill[type(inpt)]
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode="mean",
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-mean-pooling-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-mean-pooling-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import torch
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import *
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = 'output/training_stsbenchmark_avg_word_embeddings-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#Check if dataset exsist. If not, download and extract it
sts_dataset_path = 'datasets/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file('glove.6B.300d.txt.gz')
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) #10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
model.evaluate(evaluator)
|
# dataset settings
dataset_type = 'Objects365V1Dataset'
data_root = 'data/Objects365/Obj365_v1/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/objects365_val.json',
metric='bbox',
sort_categories=True,
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'Objects365V1Dataset'
data_root = 'data/Objects365/Obj365_v1/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_train.json',
data_prefix=dict(img='train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/objects365_val.json',
data_prefix=dict(img='val/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/objects365_val.json',
metric='bbox',
sort_categories=True,
format_only=False)
test_evaluator = val_evaluator
|
import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: str
is_storage: bool
length: int
_weights_only_unpickler._add_safe_globals([_Entry])
class _PseudoZipFile:
def __init__(self) -> None:
self.records: dict[str, tuple[object, int]] = {}
def write_record(self, key: str, data: object, length: int) -> None:
self.records[key] = (data, length)
def write_to(self, f: BufferedIOBase) -> None:
entries = []
for key, (data, length) in self.records.items():
entries.append(
_Entry(
key=key,
is_storage=isinstance(data, torch.UntypedStorage),
length=length,
)
)
pickle.dump(entries, f, protocol=DEFAULT_PROTOCOL)
for key, (data, length) in self.records.items():
if isinstance(data, bytes):
f.write(data)
elif isinstance(data, str):
f.write(data.encode("utf-8"))
elif isinstance(data, torch.UntypedStorage):
data._write_file(f, False, False, 1)
else:
raise TypeError(f"unknown type: {type(data)}")
def read_from(self, f: BufferedIOBase) -> None:
entries = _weights_only_unpickler.load(f)
for entry in entries:
data = f.read(entry.length)
if entry.is_storage:
storage = torch.frombuffer(
data,
dtype=torch.uint8,
).untyped_storage()
self.records[entry.key] = (
storage,
entry.length,
)
else:
self.records[entry.key] = (data, entry.length)
def has_record(self, key: str) -> bool:
return key in self.records
def get_record(self, key: str) -> object:
return self.records[key][0]
def get_storage_from_record(
self, key: str, _length: int, _type: int
) -> torch.Tensor:
return torch.tensor(self.records[key][0], dtype=torch.uint8)
def serialization_id(self) -> str:
return "torchft"
def _streaming_save(
obj: object,
f: BufferedIOBase,
pickle_module: Any = pickle,
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Save the object to a file-like object in a streaming fashion compatible with
network sockets.
This behaves similarly to :func:`torch.save` with a few notable differences:
* A non-seekable file like object can be used when loading.
* No forwards/backwards compatibility is provided for the serialization
format. This is only intended to be used with a single version of PyTorch
with transient storage (i.e. sockets or temp files).
* mmap is not supported
See :func:`torch.save` for more details on specific arguments.
"""
zip_file = _PseudoZipFile()
_save(
obj,
zip_file=zip_file,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
_disable_byteorder_record=False,
)
zip_file.write_to(f)
def _streaming_load(
f: BufferedIOBase,
map_location: MAP_LOCATION = None,
pickle_module: Any = None,
*,
weights_only: bool = True,
**pickle_load_args: Any,
) -> object:
"""
Load the object from a file-like object in a streaming fashion compatible with
network sockets.
See :func:`_streaming_save` for more details about the streaming behavior.
See :func:`torch.load` for more details on specific arguments.
"""
if weights_only:
if pickle_module is not None:
raise RuntimeError(
"Can not safely load weights when explicit pickle_module is specified"
)
pickle_module = _weights_only_unpickler
else:
if pickle_module is None:
pickle_module = pickle
if "encoding" not in pickle_load_args.keys():
pickle_load_args["encoding"] = "utf-8"
zip_file = _PseudoZipFile()
zip_file.read_from(f)
return _load(
zip_file=zip_file,
map_location=map_location,
pickle_module=pickle_module,
**pickle_load_args,
)
|
import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: str
is_storage: bool
length: int
_weights_only_unpickler._add_safe_globals([_Entry])
class _PseudoZipFile:
def __init__(self) -> None:
self.records: dict[str, tuple[object, int]] = {}
def write_record(self, key: str, data: object, length: int) -> None:
self.records[key] = (data, length)
def write_to(self, f: BufferedIOBase) -> None:
entries = []
for key, (data, length) in self.records.items():
entries.append(
_Entry(
key=key,
is_storage=isinstance(data, torch.UntypedStorage),
length=length,
)
)
pickle.dump(entries, f, protocol=DEFAULT_PROTOCOL)
for key, (data, length) in self.records.items():
if isinstance(data, bytes):
f.write(data)
elif isinstance(data, str):
f.write(data.encode("utf-8"))
elif isinstance(data, torch.UntypedStorage):
data._write_file(f, False, False, 1)
else:
raise TypeError(f"unknown type: {type(data)}")
def read_from(self, f: BufferedIOBase) -> None:
entries = _weights_only_unpickler.load(f)
for entry in entries:
data = f.read(entry.length)
if entry.is_storage:
storage = torch.frombuffer(
data,
dtype=torch.uint8,
).untyped_storage()
self.records[entry.key] = (
storage,
entry.length,
)
else:
self.records[entry.key] = (data, entry.length)
def has_record(self, key: str) -> bool:
return key in self.records
def get_record(self, key: str) -> object:
return self.records[key][0]
def get_storage_from_record(
self, key: str, _length: int, _type: int
) -> torch.Tensor:
return torch.tensor(self.records[key][0], dtype=torch.uint8)
def serialization_id(self) -> str:
return "torchft"
def _streaming_save(
obj: object,
f: BufferedIOBase,
pickle_module: Any = pickle,
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Save the object to a file-like object in a streaming fashion compatible with
network sockets.
This behaves similarly to :func:`torch.save` with a few notable differences:
* A non-seekable file like object can be used when loading.
* No forwards/backwards compatiblity is provided for the serialization
format. This is only intended to be used with a single version of PyTorch
with transient storage (i.e. sockets or temp files).
* mmap is not supported
See :func:`torch.save` for more details on specific arguments.
"""
zip_file = _PseudoZipFile()
_save(
obj,
zip_file=zip_file,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
_disable_byteorder_record=False,
)
zip_file.write_to(f)
def _streaming_load(
f: BufferedIOBase,
map_location: MAP_LOCATION = None,
pickle_module: Any = None,
*,
weights_only: bool = True,
**pickle_load_args: Any,
) -> object:
"""
Load the object from a file-like object in a streaming fashion compatible with
network sockets.
See :func:`_streaming_save` for more details about the streaming behavior.
See :func:`torch.load` for more details on specific arguments.
"""
if weights_only:
if pickle_module is not None:
raise RuntimeError(
"Can not safely load weights when explicit pickle_module is specified"
)
pickle_module = _weights_only_unpickler
else:
if pickle_module is None:
pickle_module = pickle
if "encoding" not in pickle_load_args.keys():
pickle_load_args["encoding"] = "utf-8"
zip_file = _PseudoZipFile()
zip_file.read_from(f)
return _load(
zip_file=zip_file,
map_location=map_location,
pickle_module=pickle_module,
**pickle_load_args,
)
|
# This example was adapted from https://github.com/muhrin/milad
# It is licensed under the GLPv3 license. You can find a copy of it
# here: https://www.gnu.org/licenses/gpl-3.0.en.html .
import torch
from torch import nn
from torch.func import jacrev, vmap
from torch.nn.functional import mse_loss
sigma = 0.5
epsilon = 4.0
def lennard_jones(r):
return epsilon * ((sigma / r) ** 12 - (sigma / r) ** 6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
training_size = 1000
r = torch.linspace(0.5, 2 * sigma, steps=training_size, requires_grad=True)
# Create a bunch of vectors that point along positive-x
drs = torch.outer(r, torch.tensor([1.0, 0, 0]))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
# Create training energies
training_energies = torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
# Create forces with random direction vectors
training_forces = torch.stack(
[force * dr for force, dr in zip(map(lennard_jones_force, norms), drs)]
)
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1),
)
def make_prediction(model, drs):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
network_derivs = vmap(jacrev(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return (
mse_loss(energies, predicted_energies)
+ 0.01 * mse_loss(forces, predicted_forces) / 3
)
optimiser = torch.optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(400):
optimiser.zero_grad()
energies, forces = make_prediction(model, drs)
loss = loss_fn(training_energies, training_forces, energies, forces)
loss.backward(retain_graph=True)
optimiser.step()
if epoch % 20 == 0:
print(loss.cpu().item())
|
# This example was adapated from https://github.com/muhrin/milad
# It is licensed under the GLPv3 license. You can find a copy of it
# here: https://www.gnu.org/licenses/gpl-3.0.en.html .
import torch
from torch import nn
from torch.func import jacrev, vmap
from torch.nn.functional import mse_loss
sigma = 0.5
epsilon = 4.0
def lennard_jones(r):
return epsilon * ((sigma / r) ** 12 - (sigma / r) ** 6)
def lennard_jones_force(r):
"""Get magnitude of LJ force"""
return -epsilon * ((-12 * sigma**12 / r**13) + (6 * sigma**6 / r**7))
training_size = 1000
r = torch.linspace(0.5, 2 * sigma, steps=training_size, requires_grad=True)
# Create a bunch of vectors that point along positive-x
drs = torch.outer(r, torch.tensor([1.0, 0, 0]))
norms = torch.norm(drs, dim=1).reshape(-1, 1)
# Create training energies
training_energies = torch.stack(list(map(lennard_jones, norms))).reshape(-1, 1)
# Create forces with random direction vectors
training_forces = torch.stack(
[force * dr for force, dr in zip(map(lennard_jones_force, norms), drs)]
)
model = nn.Sequential(
nn.Linear(1, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 16),
nn.Tanh(),
nn.Linear(16, 1),
)
def make_prediction(model, drs):
norms = torch.norm(drs, dim=1).reshape(-1, 1)
energies = model(norms)
network_derivs = vmap(jacrev(model))(norms).squeeze(-1)
forces = -network_derivs * drs / norms
return energies, forces
def loss_fn(energies, forces, predicted_energies, predicted_forces):
return (
mse_loss(energies, predicted_energies)
+ 0.01 * mse_loss(forces, predicted_forces) / 3
)
optimiser = torch.optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(400):
optimiser.zero_grad()
energies, forces = make_prediction(model, drs)
loss = loss_fn(training_energies, training_forces, energies, forces)
loss.backward(retain_graph=True)
optimiser.step()
if epoch % 20 == 0:
print(loss.cpu().item())
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[dict]): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from abc import ABCMeta, abstractmethod
from typing import Any, List, Optional, Sequence, Tuple, Union
from mmengine.dist import (broadcast_object_list, collect_results,
is_main_process)
class BaseMetric(metaclass=ABCMeta):
"""Base class for a metric.
The metric first processes each batch of data_samples and predictions,
and appends the processed results to the results list. Then it
collects all results together from all ranks if distributed training
is used. Finally, it computes the metrics of the entire dataset.
A subclass of class:`BaseMetric` should assign a meaningful value to the
class attribute `default_prefix`. See the argument `prefix` for details.
Args:
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
default_prefix: Optional[str] = None
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
self._dataset_meta: Union[None, dict] = None
self.collect_device = collect_device
self.results: List[Any] = []
self.prefix = prefix or self.default_prefix
if self.prefix is None:
warnings.warn('The prefix is not set in metric class '
f'{self.__class__.__name__}.')
@property
def dataset_meta(self) -> Optional[dict]:
return self._dataset_meta
@dataset_meta.setter
def dataset_meta(self, dataset_meta: dict) -> None:
self._dataset_meta = dataset_meta
@abstractmethod
def process(self, data_batch: Sequence[Tuple[Any, dict]],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (Sequence[Tuple[Any, dict]]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
@abstractmethod
def compute_metrics(self, results: list) -> dict:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: The computed metrics. The keys are the names of the metrics,
and the values are corresponding results.
"""
def evaluate(self, size: int) -> dict:
"""Evaluate the model performance of the whole dataset after processing
all batches.
Args:
size (int): Length of the entire validation dataset. When batch
size > 1, the dataloader may pad some data samples to make
sure all ranks have the same length of dataset slice. The
``collect_results`` function will drop the padded data based on
this size.
Returns:
dict: Evaluation metrics dict on the val dataset. The keys are the
names of the metrics, and the values are corresponding results.
"""
if len(self.results) == 0:
warnings.warn(
f'{self.__class__.__name__} got empty `self.results`. Please '
'ensure that the processed results are properly added into '
'`self.results` in `process` method.')
results = collect_results(self.results, size, self.collect_device)
if is_main_process():
_metrics = self.compute_metrics(results) # type: ignore
# Add prefix to metric names
if self.prefix:
_metrics = {
'/'.join((self.prefix, k)): v
for k, v in _metrics.items()
}
metrics = [_metrics]
else:
metrics = [None] # type: ignore
broadcast_object_list(metrics)
# reset the results list
self.results.clear()
return metrics[0]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
from __future__ import annotations
from langchain_core.language_models import LanguageModelLike
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.retrievers import RetrieverLike, RetrieverOutputLike
from langchain_core.runnables import RunnableBranch
def create_history_aware_retriever(
llm: LanguageModelLike,
retriever: RetrieverLike,
prompt: BasePromptTemplate,
) -> RetrieverOutputLike:
"""Create a chain that takes conversation history and returns documents.
If there is no `chat_history`, then the `input` is just passed directly to the
retriever. If there is `chat_history`, then the prompt and LLM will be used
to generate a search query. That search query is then passed to the retriever.
Args:
llm: Language model to use for generating a search term given chat history
retriever: RetrieverLike object that takes a string as input and outputs
a list of Documents.
prompt: The prompt used to generate the search query for the retriever.
Returns:
An LCEL Runnable. The runnable input must take in `input`, and if there
is chat history should take it in the form of `chat_history`.
The Runnable output is a list of Documents
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import create_history_aware_retriever
from langchain import hub
rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase")
llm = ChatOpenAI()
retriever = ...
chat_retriever_chain = create_history_aware_retriever(
llm, retriever, rephrase_prompt
)
chain.invoke({"input": "...", "chat_history": })
"""
if "input" not in prompt.input_variables:
msg = (
"Expected `input` to be a prompt variable, "
f"but got {prompt.input_variables}"
)
raise ValueError(msg)
retrieve_documents: RetrieverOutputLike = RunnableBranch(
(
# Both empty string and empty list evaluate to False
lambda x: not x.get("chat_history", False),
# If no chat history, then we just pass input to retriever
(lambda x: x["input"]) | retriever,
),
# If chat history, then we pass inputs to LLM chain, then to retriever
prompt | llm | StrOutputParser() | retriever,
).with_config(run_name="chat_retriever_chain")
return retrieve_documents
|
from __future__ import annotations
from langchain_core.language_models import LanguageModelLike
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.retrievers import RetrieverLike, RetrieverOutputLike
from langchain_core.runnables import RunnableBranch
def create_history_aware_retriever(
llm: LanguageModelLike,
retriever: RetrieverLike,
prompt: BasePromptTemplate,
) -> RetrieverOutputLike:
"""Create a chain that takes conversation history and returns documents.
If there is no `chat_history`, then the `input` is just passed directly to the
retriever. If there is `chat_history`, then the prompt and LLM will be used
to generate a search query. That search query is then passed to the retriever.
Args:
llm: Language model to use for generating a search term given chat history
retriever: RetrieverLike object that takes a string as input and outputs
a list of Documents.
prompt: The prompt used to generate the search query for the retriever.
Returns:
An LCEL Runnable. The runnable input must take in `input`, and if there
is chat history should take it in the form of `chat_history`.
The Runnable output is a list of Documents
Example:
.. code-block:: python
# pip install -U langchain langchain-community
from langchain_community.chat_models import ChatOpenAI
from langchain.chains import create_history_aware_retriever
from langchain import hub
rephrase_prompt = hub.pull("langchain-ai/chat-langchain-rephrase")
llm = ChatOpenAI()
retriever = ...
chat_retriever_chain = create_history_aware_retriever(
llm, retriever, rephrase_prompt
)
chain.invoke({"input": "...", "chat_history": })
"""
if "input" not in prompt.input_variables:
raise ValueError(
"Expected `input` to be a prompt variable, "
f"but got {prompt.input_variables}"
)
retrieve_documents: RetrieverOutputLike = RunnableBranch(
(
# Both empty string and empty list evaluate to False
lambda x: not x.get("chat_history", False),
# If no chat history, then we just pass input to retriever
(lambda x: x["input"]) | retriever,
),
# If chat history, then we pass inputs to LLM chain, then to retriever
prompt | llm | StrOutputParser() | retriever,
).with_config(run_name="chat_retriever_chain")
return retrieve_documents
|
from . import InputExample
import csv
import os
class TripletReader(object):
"""Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1),
one positive example (s2) and one negative example (s3)
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
s3_col_idx=2,
has_header=False,
delimiter="\t",
quoting=csv.QUOTE_NONE,
):
self.dataset_folder = dataset_folder
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.s3_col_idx = s3_col_idx
self.has_header = has_header
self.delimiter = delimiter
self.quoting = quoting
def get_examples(self, filename, max_examples=0):
data = csv.reader(
open(os.path.join(self.dataset_folder, filename), encoding="utf-8"),
delimiter=self.delimiter,
quoting=self.quoting,
)
examples = []
if self.has_header:
next(data)
for id, row in enumerate(data):
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
s3 = row[self.s3_col_idx]
examples.append(InputExample(texts=[s1, s2, s3]))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
from . import InputExample
import csv
import os
class TripletReader(object):
"""
Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1),
one positive example (s2) and one negative example (s3)
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
s3_col_idx=2,
has_header=False,
delimiter="\t",
quoting=csv.QUOTE_NONE,
):
self.dataset_folder = dataset_folder
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.s3_col_idx = s3_col_idx
self.has_header = has_header
self.delimiter = delimiter
self.quoting = quoting
def get_examples(self, filename, max_examples=0):
""" """
data = csv.reader(
open(os.path.join(self.dataset_folder, filename), encoding="utf-8"),
delimiter=self.delimiter,
quoting=self.quoting,
)
examples = []
if self.has_header:
next(data)
for id, row in enumerate(data):
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
s3 = row[self.s3_col_idx]
examples.append(InputExample(texts=[s1, s2, s3]))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to
[`ImageBytes`][docarray.typing.ImageBytes] which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor] = None
url: Optional[ImageUrl] = None
bytes: Optional[ImageBytes] = None
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
T = TypeVar('T', bound='ImageTorchTensor')
@_register_proto(proto_type_name='image_torch_tensor')
class ImageTorchTensor(AbstractImageTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of [`TorchTensor`][docarray.typing.TorchTensor], to represent an image tensor.
Adds image-specific features to the tensor.
For instance the ability convert the tensor back to
[`ImageBytes`][docarray.typing.ImageBytes] which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTorchTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTorchTensor] = None
url: Optional[ImageUrl] = None
bytes: Optional[ImageBytes] = None
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
import numpy as np
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from scipy import signal
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class FunctionalTestImpl(TestBaseMixin):
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
["full", "valid", "same"],
)
def test_convolve_numerics(self, leading_dims, lengths, mode):
"""Check that convolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.convolve(x, y, mode=mode)
num_signals = torch.tensor(leading_dims).prod() if leading_dims else 1
x_reshaped = x.reshape((num_signals, L_x))
y_reshaped = y.reshape((num_signals, L_y))
expected = [
signal.convolve(x_reshaped[i].detach().cpu().numpy(), y_reshaped[i].detach().cpu().numpy(), mode=mode)
for i in range(num_signals)
]
expected = torch.tensor(np.array(expected))
expected = expected.reshape(leading_dims + (-1,))
self.assertEqual(expected, actual)
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
["full", "valid", "same"],
)
def test_fftconvolve_numerics(self, leading_dims, lengths, mode):
"""Check that fftconvolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.fftconvolve(x, y, mode=mode)
expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1, mode=mode)
expected = torch.tensor(expected)
self.assertEqual(expected, actual)
@nested_params(
[F.convolve, F.fftconvolve],
[(4, 3, 1, 2), (1,)],
[(10, 4), (2, 2, 2)],
)
def test_convolve_input_leading_dim_check(self, fn, x_shape, y_shape):
"""Check that convolve properly rejects inputs with different leading dimensions."""
x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)
y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)
with self.assertRaisesRegex(ValueError, "Leading dimensions"):
fn(x, y)
def test_add_noise_broadcast(self):
"""Check that add_noise produces correct outputs when broadcasting input dimensions."""
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(5, 1, 1, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(5, 1, 3, dtype=self.dtype, device=self.device)
snr = torch.rand(1, 1, 1, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
noise_expanded = noise.expand(*leading_dims, L)
snr_expanded = snr.expand(*leading_dims)
lengths_expanded = lengths.expand(*leading_dims)
expected = F.add_noise(waveform, noise_expanded, lengths_expanded, snr_expanded)
self.assertEqual(expected, actual)
@parameterized.expand(
[((5, 2, 3), (2, 1, 1), (5, 2), (5, 2, 3)), ((2, 1), (5,), (5,), (5,)), ((3,), (5, 2, 3), (2, 1, 1), (5, 2))]
)
def test_add_noise_leading_dim_check(self, waveform_dims, noise_dims, lengths_dims, snr_dims):
"""Check that add_noise properly rejects inputs with different leading dimension lengths."""
L = 51
waveform = torch.rand(*waveform_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*noise_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*lengths_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*snr_dims, dtype=self.dtype, device=self.device) * 10
with self.assertRaisesRegex(ValueError, "Input leading dimensions"):
F.add_noise(waveform, noise, lengths, snr)
def test_add_noise_length_check(self):
"""Check that add_noise properly rejects inputs that have inconsistent length dimensions."""
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, 50, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
with self.assertRaisesRegex(ValueError, "Length dimensions"):
F.add_noise(waveform, noise, lengths, snr)
|
import numpy as np
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from scipy import signal
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class FunctionalTestImpl(TestBaseMixin):
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
)
def test_convolve_numerics(self, leading_dims, lengths):
"""Check that convolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.convolve(x, y)
num_signals = torch.tensor(leading_dims).prod() if leading_dims else 1
x_reshaped = x.reshape((num_signals, L_x))
y_reshaped = y.reshape((num_signals, L_y))
expected = [
signal.convolve(x_reshaped[i].detach().cpu().numpy(), y_reshaped[i].detach().cpu().numpy())
for i in range(num_signals)
]
expected = torch.tensor(np.array(expected))
expected = expected.reshape(leading_dims + (-1,))
self.assertEqual(expected, actual)
@nested_params(
[(10, 4), (4, 3, 1, 2), (2,), ()],
[(100, 43), (21, 45)],
)
def test_fftconvolve_numerics(self, leading_dims, lengths):
"""Check that fftconvolve returns values identical to those that SciPy produces."""
L_x, L_y = lengths
x = torch.rand(*(leading_dims + (L_x,)), dtype=self.dtype, device=self.device)
y = torch.rand(*(leading_dims + (L_y,)), dtype=self.dtype, device=self.device)
actual = F.fftconvolve(x, y)
expected = signal.fftconvolve(x.detach().cpu().numpy(), y.detach().cpu().numpy(), axes=-1)
expected = torch.tensor(expected)
self.assertEqual(expected, actual)
@nested_params(
[F.convolve, F.fftconvolve],
[(4, 3, 1, 2), (1,)],
[(10, 4), (2, 2, 2)],
)
def test_convolve_input_leading_dim_check(self, fn, x_shape, y_shape):
"""Check that convolve properly rejects inputs with different leading dimensions."""
x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)
y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)
with self.assertRaisesRegex(ValueError, "Leading dimensions"):
fn(x, y)
def test_add_noise_broadcast(self):
"""Check that add_noise produces correct outputs when broadcasting input dimensions."""
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(5, 1, 1, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(5, 1, 3, dtype=self.dtype, device=self.device)
snr = torch.rand(1, 1, 1, dtype=self.dtype, device=self.device) * 10
actual = F.add_noise(waveform, noise, lengths, snr)
noise_expanded = noise.expand(*leading_dims, L)
snr_expanded = snr.expand(*leading_dims)
lengths_expanded = lengths.expand(*leading_dims)
expected = F.add_noise(waveform, noise_expanded, lengths_expanded, snr_expanded)
self.assertEqual(expected, actual)
@parameterized.expand(
[((5, 2, 3), (2, 1, 1), (5, 2), (5, 2, 3)), ((2, 1), (5,), (5,), (5,)), ((3,), (5, 2, 3), (2, 1, 1), (5, 2))]
)
def test_add_noise_leading_dim_check(self, waveform_dims, noise_dims, lengths_dims, snr_dims):
"""Check that add_noise properly rejects inputs with different leading dimension lengths."""
L = 51
waveform = torch.rand(*waveform_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*noise_dims, L, dtype=self.dtype, device=self.device)
lengths = torch.rand(*lengths_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*snr_dims, dtype=self.dtype, device=self.device) * 10
with self.assertRaisesRegex(ValueError, "Input leading dimensions"):
F.add_noise(waveform, noise, lengths, snr)
def test_add_noise_length_check(self):
"""Check that add_noise properly rejects inputs that have inconsistent length dimensions."""
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device)
noise = torch.rand(*leading_dims, 50, dtype=self.dtype, device=self.device)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device) * 10
with self.assertRaisesRegex(ValueError, "Length dimensions"):
F.add_noise(waveform, noise, lengths, snr)
|
from typing_extensions import TYPE_CHECKING
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray, AudioTensor
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray, VideoTensor
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'ImageTensor',
'AudioTensor',
'VideoTensor',
'ImageNdArray',
'ImageBytes',
'VideoBytes',
'AudioBytes',
]
_torch_tensors = [
'TorchTensor',
'TorchEmbedding',
'ImageTorchTensor',
'AudioTorchTensor',
'VideoTorchTensor',
]
_tf_tensors = [
'TensorFlowTensor',
'TensorFlowEmbedding',
'ImageTensorFlowTensor',
'AudioTensorFlowTensor',
'VideoTensorFlowTensor',
]
__all_test__ = __all__ + _torch_tensors
def __getattr__(name: str):
if name in _torch_tensors:
import_library('torch', raise_error=True)
elif name in _tf_tensors:
import_library('tensorflow', raise_error=True)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
import docarray.typing.tensor
tensor_cls = getattr(docarray.typing.tensor, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
from typing_extensions import TYPE_CHECKING
from docarray.typing.bytes import AudioBytes, ImageBytes, VideoBytes
from docarray.typing.id import ID
from docarray.typing.tensor import ImageNdArray, ImageTensor
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTensorFlowTensor # noqa: F401
from docarray.typing.tensor.audio import AudioTorchTensor # noqa: F401
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.video import VideoTensorFlowTensor # noqa: F401
from docarray.typing.tensor.video import VideoTorchTensor # noqa: F401
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'ImageTensor',
'ImageNdArray',
'ImageBytes',
'VideoBytes',
'AudioBytes',
]
_torch_tensors = [
'TorchTensor',
'TorchEmbedding',
'ImageTorchTensor',
'AudioTorchTensor',
'VideoTorchTensor',
]
_tf_tensors = [
'TensorFlowTensor',
'TensorFlowEmbedding',
'ImageTensorFlowTensor',
'AudioTensorFlowTensor',
'VideoTensorFlowTensor',
]
__all_test__ = __all__ + _torch_tensors
def __getattr__(name: str):
if name in _torch_tensors:
import_library('torch', raise_error=True)
elif name in _tf_tensors:
import_library('tensorflow', raise_error=True)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
import docarray.typing.tensor
tensor_cls = getattr(docarray.typing.tensor, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@LOSSES.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@LOSSES.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from pathlib import Path
import pytest
import librosa
import numpy as np
from jina import Document, DocumentArray, Executor
from jina.excepts import BadDocType
from ...audio_clip_encoder import AudioCLIPEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_path.endswith('AudioCLIP-Full-Training.pt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
str(Path(__file__).parents[1] / 'test_data/sample.wav')
)
docs = DocumentArray([Document(blob=x_audio, tags={'sample_rate': sample_rate})])
model = AudioCLIPEncoder()
model.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024,)
assert docs[0].tags['sample_rate'] == AudioCLIPEncoder.TARGET_SAMPLE_RATE
def test_many_documents():
audio1, sample_rate1 = librosa.load(
str(Path(__file__).parents[1] / 'test_data/sample.mp3')
)
audio2, sample_rate2 = librosa.load(
str(Path(__file__).parents[1] / 'test_data/sample.wav')
)
docs = DocumentArray(
[
Document(blob=audio1, tags={'sample_rate': sample_rate1}),
Document(blob=audio2, tags={'sample_rate': sample_rate2}),
]
)
encoder = AudioCLIPEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024,)
assert docs[0].tags['sample_rate'] == AudioCLIPEncoder.TARGET_SAMPLE_RATE
assert docs[1].embedding.shape == (1024,)
assert docs[1].tags['sample_rate'] == AudioCLIPEncoder.TARGET_SAMPLE_RATE
def test_traversal_paths():
audio1, sample_rate1 = librosa.load(
str(Path(__file__).parents[1] / 'test_data/sample.mp3')
)
audio2, sample_rate2 = librosa.load(
str(Path(__file__).parents[1] / 'test_data/sample.wav')
)
audio1_chunks = np.split(audio1, 4)
audio2_chunks = np.split(audio2, 2)
docs = DocumentArray(
[
Document(
id='root1',
blob=audio1,
tags={'sample_rate': sample_rate1},
chunks=[
Document(
id=f'chunk1{i}', blob=chunk, tags={'sample_rate': sample_rate1}
)
for i, chunk in enumerate(audio1_chunks)
],
),
Document(
id='root2',
blob=audio2,
tags={'sample_rate': sample_rate2},
chunks=[
Document(
id='chunk21',
blob=audio2_chunks[0],
tags={'sample_rate': sample_rate2},
),
Document(
id='chunk22',
blob=audio2_chunks[1],
tags={'sample_rate': sample_rate2},
chunks=[
Document(
id=f'chunk22{i}',
blob=chunk,
tags={'sample_rate': sample_rate2},
)
for i, chunk in enumerate(np.split(audio2_chunks[1], 3))
],
),
],
),
]
)
encoder = AudioCLIPEncoder(default_traversal_paths=['c'])
encoder.encode(docs, parameters={})
encoder.encode(docs, parameters={'traversal_paths': ['cc']})
for path, count in [['r', 0], ['c', 6], ['cc', 3]]:
embeddings = [
embedding
for embedding in DocumentArray(docs)
.traverse_flat([path])
.get_attributes('embedding')
if embedding is not None
]
sample_rates = {
doc.tags['sample_rate'] for doc in DocumentArray(docs).traverse_flat([path])
}
assert all(embedding.shape == (1024,) for embedding in embeddings)
assert len(embeddings) == count
if path != 'r':
assert (
len(sample_rates) == 1
and sample_rates.pop() == AudioCLIPEncoder.TARGET_SAMPLE_RATE
)
def test_no_sample_rate():
audio, sample_rate = librosa.load(
str(Path(__file__).parents[1] / 'test_data/sample.mp3')
)
docs = DocumentArray([Document(blob=audio)])
encoder = AudioCLIPEncoder()
with pytest.raises(
BadDocType, match='sample rate is not given, please provide a valid sample rate'
):
encoder.encode(docs, parameters={})
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import librosa
import numpy as np
from jina import Document, DocumentArray, Executor
from ...audio_clip_encoder import AudioCLIPEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.model_path.endswith('AudioCLIP-Full-Training.pt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(
Path(__file__).parents[1] / 'test_data/sample.wav'
)
docs = DocumentArray([Document(blob=x_audio)])
model = AudioCLIPEncoder()
model.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024,)
def test_many_documents():
audio1, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.mp3')
audio2, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.wav')
docs = DocumentArray([Document(blob=audio1), Document(blob=audio2)])
encoder = AudioCLIPEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding.shape == (1024,)
assert docs[1].embedding.shape == (1024,)
def test_traversal_paths():
audio1, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.mp3')
audio2, _ = librosa.load(Path(__file__).parents[1] / 'test_data/sample.wav')
audio1_chunks = np.split(audio1, 4)
audio2_chunks = np.split(audio2, 2)
docs = DocumentArray(
[
Document(
id='root1',
blob=audio1,
chunks=[
Document(id=f'chunk1{i}', blob=chunk)
for i, chunk in enumerate(audio1_chunks)
],
),
Document(
id='root2',
blob=audio2,
chunks=[
Document(id='chunk21', blob=audio2_chunks[0]),
Document(
id='chunk22',
blob=audio2_chunks[1],
chunks=[
Document(id=f'chunk22{i}', blob=chunk)
for i, chunk in enumerate(np.split(audio2_chunks[1], 3))
],
),
],
),
]
)
encoder = AudioCLIPEncoder(default_traversal_paths=['c'])
encoder.encode(docs, parameters={})
encoder.encode(docs, parameters={'traversal_paths': ['cc']})
for path, count in [['r', 0], ['c', 6], ['cc', 3]]:
embeddings = (
DocumentArray(docs).traverse_flat([path]).get_attributes('embedding')
)
assert all(embedding.shape == (1024,) for embedding in embeddings)
assert len(embeddings) == count
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
if not name:
return metrics
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
random_size_range=(10, 20),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(416, 416),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
train_dataset = dict(pipeline=train_pipeline)
data = dict(
train=train_dataset,
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* [:footcite:`Mir2015QUESST2014EQ`] Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
assert subset in ["docs", "dev", "eval"], "`subset` must be one of ['docs', 'dev', 'eval']"
assert language is None or language in _LANGUAGES, f"`language` must be None or one of {str(_LANGUAGES)}"
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def _load_sample(self, n: int) -> Tuple[torch.Tensor, int, str]:
audio_path = self.data[n]
wav, sample_rate = torchaudio.load(audio_path)
return wav, sample_rate, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, file_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* [:footcite:`Mir2015QUESST2014EQ`] Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
assert subset in ["docs", "dev", "eval"], "`subset` must be one of ['docs', 'dev', 'eval']"
assert language is None or language in _LANGUAGES, f"`language` must be None or one of {str(_LANGUAGES)}"
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def _load_sample(self, n: int) -> Tuple[torch.Tensor, str]:
audio_path = self.data[n]
wav, sample_rate = torchaudio.load(audio_path)
return wav, sample_rate, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, file_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import uuid
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
cur_dir = os.path.dirname(os.path.abspath(__file__))
qdrant_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {qdrant_yml} up -d --remove-orphans")
time.sleep(1)
yield
os.system(f"docker-compose -f {qdrant_yml} down --remove-orphans")
@pytest.fixture(scope='function')
def tmp_collection_name():
return uuid.uuid4().hex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
for collection in client.get_collections().collections:
client.delete_collection(collection.name)
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import os
import time
import uuid
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
cur_dir = os.path.dirname(os.path.abspath(__file__))
qdrant_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {qdrant_yml} up -d --remove-orphans")
time.sleep(1)
yield
os.system(f"docker-compose -f {qdrant_yml} down --remove-orphans")
@pytest.fixture(scope='function')
def tmp_collection_name():
return uuid.uuid4().hex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
for collection in client.get_collections().collections:
client.delete_collection(collection.name)
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_extmem_qdm(
n_samples_per_batch: int, n_features: int, n_batches: int, on_host: bool
) -> None:
check_extmem_qdm(n_samples_per_batch, n_features, n_batches, "cuda", on_host)
@given(
strategies.integers(1, 64),
strategies.integers(1, 8),
strategies.integers(1, 4),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_quantile_objective(
n_samples_per_batch: int, n_features: int, n_batches: int
) -> None:
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"hist",
"cuda",
)
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"approx",
"cuda",
)
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_extmem_qdm(
n_samples_per_batch: int, n_features: int, n_batches: int, on_host: bool
) -> None:
check_extmem_qdm(n_samples_per_batch, n_features, n_batches, "cuda", on_host)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCoSENTLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCoSENTLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_qdm: bool) -> None:
n_workers = 2
with LocalCUDACluster(n_workers=2) as cluster:
with Client(cluster) as client:
args = get_rabit_args(client, 2)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cuda",
comm_args=args,
is_qdm=is_qdm,
)
client.gather(futs)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from dask_cuda import LocalCUDACluster
from distributed import Client
import xgboost as xgb
from xgboost import dask as dxgb
from xgboost.testing.dask import check_external_memory
@pytest.mark.parametrize("is_qdm", [True, False])
def test_external_memory(is_qdm: bool) -> None:
n_workers = 2
with LocalCUDACluster(n_workers=2) as cluster:
with Client(cluster) as client:
args = client.sync(
dxgb._get_rabit_args,
2,
None,
client,
)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cuda",
comm_args=args,
is_qdm=is_qdm,
)
client.gather(futs)
|
"""
Computes embeddings
"""
import numpy as np
from sentence_transformers import SentenceTransformer
def test_encode_token_embeddings(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
"""
Test that encode(output_value='token_embeddings') works
"""
model = paraphrase_distilroberta_base_v1_model
sent = [
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
"Sentences",
"Sentence five five five five five five five",
]
emb = model.encode(sent, output_value="token_embeddings", batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(model.tokenize([s])["input_ids"][0]) == e.shape[0]
def test_encode_single_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Single sentence
emb = model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Single sentence as list
emb = model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.002
# Sentence list
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.007
def test_encode_normalize(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
emb = model.encode(
[
"Hello Word, a test sentence",
"Here comes another sentence",
"My final sentence",
],
normalize_embeddings=True,
)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(paraphrase_distilroberta_base_v1_model: SentenceTransformer) -> None:
model = paraphrase_distilroberta_base_v1_model
# Input a sentence tuple
emb = model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.002
# List of sentence tuples
emb = model.encode(
[
("Hello Word, a test sentence", "Second input for model"),
("My second tuple", "With two inputs"),
("Final tuple", "final test"),
]
)
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.002
|
"""
Computes embeddings
"""
import unittest
from sentence_transformers import SentenceTransformer
import numpy as np
class ComputeEmbeddingsTest(unittest.TestCase):
def setUp(self):
self.model = SentenceTransformer('paraphrase-distilroberta-base-v1')
def test_encode_token_embeddings(self):
"""
Test that encode(output_value='token_embeddings') works
:return:
"""
sent = ["Hello Word, a test sentence", "Here comes another sentence", "My final sentence", "Sentences", "Sentence five five five five five five five"]
emb = self.model.encode(sent, output_value='token_embeddings', batch_size=2)
assert len(emb) == len(sent)
for s, e in zip(sent, emb):
assert len(self.model.tokenize([s])['input_ids'][0]) == e.shape[0]
def test_encode_single_sentences(self):
#Single sentence
emb = self.model.encode("Hello Word, a test sentence")
assert emb.shape == (768,)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Single sentence as list
emb = self.model.encode(["Hello Word, a test sentence"])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 7.9811716) < 0.001
# Sentence list
emb = self.model.encode(["Hello Word, a test sentence", "Here comes another sentence", "My final sentence"])
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 22.968266) < 0.001
def test_encode_normalize(self):
emb = self.model.encode(["Hello Word, a test sentence", "Here comes another sentence", "My final sentence"], normalize_embeddings=True)
assert emb.shape == (3, 768)
for norm in np.linalg.norm(emb, axis=1):
assert abs(norm - 1) < 0.001
def test_encode_tuple_sentences(self):
# Input a sentence tuple
emb = self.model.encode([("Hello Word, a test sentence", "Second input for model")])
assert emb.shape == (1, 768)
assert abs(np.sum(emb) - 9.503508) < 0.001
# List of sentence tuples
emb = self.model.encode([("Hello Word, a test sentence", "Second input for model"), ("My second tuple", "With two inputs"), ("Final tuple", "final test")])
assert emb.shape == (3, 768)
assert abs(np.sum(emb) - 32.14627) < 0.001
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.