input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""base multi modal retriever."""
from abc import abstractmethod
from typing import List
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.image_retriever import BaseImageRetriever
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWithScore
class MultiModalRetriever(BaseRetriever, BaseImageRetriever):
"""Multi Modal base retriever."""
@abstractmethod
def text_retrieve(self, str_or_query_bundle: QueryType) -> List[NodeWithScore]:
"""
Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
def text_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
def image_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given image query.
Implemented by the user.
"""
@abstractmethod
async def atext_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def atext_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def aimage_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given image query.
Implemented by the user.
"""
|
"""base multi modal retriever."""
from abc import abstractmethod
from typing import List
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.image_retriever import BaseImageRetriever
from llama_index.core.indices.query.schema import QueryType
from llama_index.core.schema import NodeWithScore
class MultiModalRetriever(BaseRetriever, BaseImageRetriever):
"""Multi Modal base retriever."""
@abstractmethod
def text_retrieve(self, str_or_query_bundle: QueryType) -> List[NodeWithScore]:
"""
Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
def text_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
def image_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Retrieve image nodes given image query.
Implemented by the user.
"""
@abstractmethod
async def atext_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve text nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def atext_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given text query.
Implemented by the user.
"""
@abstractmethod
async def aimage_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""
Async Retrieve image nodes given image query.
Implemented by the user.
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, oid_challenge_classes,
oid_v6_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import INSTANCE_OFFSET
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes',
'oid_challenge_classes', 'INSTANCE_OFFSET'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall'
]
|
import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box import BoxReaderBase
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_box_folder_files_details,
get_ai_response_from_box_files,
)
from box_sdk_gen import BoxClient, File
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
logger = logging.getLogger(__name__)
class BoxReaderAIPrompt(BoxReaderBase):
"""
A reader class for loading data from Box files using a custom AI prompt.
This class inherits from the `BaseReader` class and allows specifying a
custom AI prompt for data extraction. It utilizes the provided BoxClient object
to interact with the Box API and extracts data based on the prompt.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
_box_client: BoxClient
@classmethod
def class_name(cls) -> str:
return "BoxReaderAIPrompt"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
# def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
def load_data(
self,
ai_prompt: str,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
individual_document_prompt: bool = True,
) -> List[Document]:
"""
Extracts data from Box files using a custom AI prompt and creates Document objects.
This method utilizes a user-provided AI prompt to potentially extract
more specific data from the Box files compared to pre-configured AI
services like Box AI Extract. It then creates Document objects containing
the extracted data along with file metadata.
Args:
ai_prompt (str): The custom AI prompt that specifies what data to
extract from the files.
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract data from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract data from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts data from sub-folders within the specified folder.
Defaults to False.
individual_document_prompt (bool, optional): If True, applies the
provided AI prompt to each document individually. If False,
all documents are used for context to the answer.
Defaults to True.
Returns:
List[Document]: A list of Document objects containing the extracted
data and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get box files information
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_ai_response_from_box_files(
box_client=self._box_client,
box_files=box_files,
ai_prompt=ai_prompt,
individual_document_prompt=individual_document_prompt,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.ai_response if file.ai_response else ""
doc.metadata["ai_prompt"] = file.ai_prompt
doc.metadata["ai_response"] = file.ai_response if file.ai_response else ""
docs.append(doc)
return docs
def load_resource(self, box_file_id: str, ai_prompt: str) -> List[Document]:
"""
Load data from a specific resource.
Args:
resource (str): The resource identifier.
Returns:
List[Document]: A list of documents loaded from the resource.
"""
return self.load_data(file_ids=[box_file_id], ai_prompt=ai_prompt)
|
import logging
from typing import List, Optional
from llama_index.core.schema import Document
from llama_index.readers.box import BoxReaderBase
from llama_index.readers.box.BoxAPI.box_api import (
box_check_connection,
get_box_files_details,
get_box_folder_files_details,
get_ai_response_from_box_files,
)
from box_sdk_gen import BoxClient, File
from llama_index.readers.box.BoxAPI.box_llama_adaptors import box_file_to_llama_document
logger = logging.getLogger(__name__)
class BoxReaderAIPrompt(BoxReaderBase):
"""
A reader class for loading data from Box files using a custom AI prompt.
This class inherits from the `BaseReader` class and allows specifying a
custom AI prompt for data extraction. It utilizes the provided BoxClient object
to interact with the Box API and extracts data based on the prompt.
Attributes:
_box_client (BoxClient): An authenticated Box client object used
for interacting with the Box API.
"""
_box_client: BoxClient
@classmethod
def class_name(cls) -> str:
return "BoxReaderAIPrompt"
def __init__(self, box_client: BoxClient):
super().__init__(box_client=box_client)
# def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
def load_data(
self,
ai_prompt: str,
file_ids: Optional[List[str]] = None,
folder_id: Optional[str] = None,
is_recursive: bool = False,
individual_document_prompt: bool = True,
) -> List[Document]:
"""
Extracts data from Box files using a custom AI prompt and creates Document objects.
This method utilizes a user-provided AI prompt to potentially extract
more specific data from the Box files compared to pre-configured AI
services like Box AI Extract. It then creates Document objects containing
the extracted data along with file metadata.
Args:
ai_prompt (str): The custom AI prompt that specifies what data to
extract from the files.
file_ids (Optional[List[str]], optional): A list of Box file IDs
to extract data from. If provided, folder_id is ignored.
Defaults to None.
folder_id (Optional[str], optional): The ID of the Box folder to
extract data from. If provided, along with is_recursive set to
True, retrieves data from sub-folders as well. Defaults to None.
is_recursive (bool, optional): If True and folder_id is provided,
extracts data from sub-folders within the specified folder.
Defaults to False.
individual_document_prompt (bool, optional): If True, applies the
provided AI prompt to each document individually. If False,
all documents are used for context to the answer.
Defaults to True.
Returns:
List[Document]: A list of Document objects containing the extracted
data and file metadata.
"""
# Connect to Box
box_check_connection(self._box_client)
docs: List[Document] = []
box_files: List[File] = []
# get box files information
if file_ids is not None:
box_files.extend(
get_box_files_details(box_client=self._box_client, file_ids=file_ids)
)
elif folder_id is not None:
box_files.extend(
get_box_folder_files_details(
box_client=self._box_client,
folder_id=folder_id,
is_recursive=is_recursive,
)
)
box_files = get_ai_response_from_box_files(
box_client=self._box_client,
box_files=box_files,
ai_prompt=ai_prompt,
individual_document_prompt=individual_document_prompt,
)
for file in box_files:
doc = box_file_to_llama_document(file)
doc.text = file.ai_response if file.ai_response else ""
doc.metadata["ai_prompt"] = file.ai_prompt
doc.metadata["ai_response"] = file.ai_response if file.ai_response else ""
docs.append(doc)
return docs
def load_resource(self, box_file_id: str, ai_prompt: str) -> List[Document]:
"""
Load data from a specific resource.
Args:
resource (str): The resource identifier.
Returns:
List[Document]: A list of documents loaded from the resource.
"""
return self.load_data(file_ids=[box_file_id], ai_prompt=ai_prompt)
|
from .hnswlib_searcher import HnswlibSearcher
|
from .hnswlib_searcher import HnswlibSearcher
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "uint8", "ubinary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using FAISS
rescore_multiplier = 4
results, search_time = semantic_search_faiss(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_faiss
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "uint8", "ubinary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using FAISS
rescore_multiplier = 4
results, search_time = semantic_search_faiss(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
import strawberry
from fastapi import FastAPI
from strawberry.fastapi import GraphQLRouter
@strawberry.type
class User:
name: str
age: int
@strawberry.type
class Query:
@strawberry.field
def user(self) -> User:
return User(name="Patrick", age=100)
schema = strawberry.Schema(query=Query)
graphql_app = GraphQLRouter(schema)
app = FastAPI()
app.include_router(graphql_app, prefix="/graphql")
|
import strawberry
from fastapi import FastAPI
from strawberry.asgi import GraphQL
@strawberry.type
class User:
name: str
age: int
@strawberry.type
class Query:
@strawberry.field
def user(self) -> User:
return User(name="Patrick", age=100)
schema = strawberry.Schema(query=Query)
graphql_app = GraphQL(schema)
app = FastAPI()
app.add_route("/graphql", graphql_app)
app.add_websocket_route("/graphql", graphql_app)
|
import warnings
from sys import platform
from typing import Optional
import torch
import torchaudio
dict_format = {
torch.uint8: "u8",
torch.int16: "s16",
torch.int32: "s32",
torch.int64: "s64",
torch.float32: "flt",
torch.float64: "dbl",
}
@torchaudio._extension.fail_if_no_ffmpeg
def play_audio(
waveform: torch.Tensor,
sample_rate: Optional[float],
device: Optional[str] = None,
) -> None:
"""Plays audio through specified or available output device.
.. warning::
This function is currently only supported on MacOS, and requires
libavdevice (FFmpeg) with ``audiotoolbox`` output device.
.. note::
This function can play up to two audio channels.
Args:
waveform: Tensor containing the audio to play.
Expected shape: `(time, num_channels)`.
sample_rate: Sample rate of the audio to play.
device: Output device to use. If None, the default device is used.
"""
if platform == "darwin":
device = device or "audiotoolbox"
path = "-"
else:
raise ValueError(f"This function only supports MacOS, but current OS is {platform}")
available_devices = list(torchaudio.utils.ffmpeg_utils.get_output_devices().keys())
if device not in available_devices:
raise ValueError(f"Device {device} is not available. Available devices are: {available_devices}")
if waveform.dtype not in dict_format:
raise ValueError(f"Unsupported type {waveform.dtype}. The list of supported types is: {dict_format.keys()}")
format = dict_format[waveform.dtype]
if waveform.ndim != 2:
raise ValueError(f"Expected 2D tensor with shape `(time, num_channels)`, got {waveform.ndim}D tensor instead")
time, num_channels = waveform.size()
if num_channels > 2:
warnings.warn(
f"Expected up to 2 channels, got {num_channels} channels instead. Only the first 2 channels will be played."
)
# Write to speaker device
s = torchaudio.io.StreamWriter(dst=path, format=device)
s.add_audio_stream(sample_rate, num_channels, format=format)
# write audio to the device
block_size = 256
with s.open():
for i in range(0, time, block_size):
s.write_audio_chunk(0, waveform[i : i + block_size, :])
|
import warnings
from sys import platform
from typing import Optional
import torch
import torchaudio
from torchaudio.io import StreamWriter
dict_format = {
torch.uint8: "u8",
torch.int16: "s16",
torch.int32: "s32",
torch.int64: "s64",
torch.float32: "flt",
torch.float64: "dbl",
}
@torchaudio._extension.fail_if_no_ffmpeg
def play_audio(
waveform: torch.Tensor,
sample_rate: Optional[float],
device: Optional[str] = None,
) -> None:
"""Plays audio through specified or available output device.
This function is currently only supported on MacOS, which has access
to "audiotoolbox" output device that can play up to two audio channels.
Args:
waveform: Tensor containing the audio to play.
Expected shape: `(time, num_channels)`.
sample_rate: Sample rate of the audio to play.
device: Output device to use. If None, the default device is used.
"""
if platform == "darwin":
device = device or "audiotoolbox"
path = "-"
else:
raise ValueError(f"This function only supports MacOS, but current OS is {platform}")
available_devices = list(torchaudio.utils.ffmpeg_utils.get_output_devices().keys())
if device not in available_devices:
raise ValueError(f"Device {device} is not available. Available devices are: {available_devices}")
if waveform.dtype not in dict_format:
raise ValueError(f"Unsupported type {waveform.dtype}. The list of supported types is: {dict_format.keys()}")
format = dict_format[waveform.dtype]
if waveform.ndim != 2:
raise ValueError(f"Expected 2D tensor with shape `(time, num_channels)`, got {waveform.ndim}D tensor instead")
time, num_channels = waveform.size()
if num_channels > 2:
warnings.warn(
f"Expected up to 2 channels, got {num_channels} channels instead. Only the first 2 channels will be played."
)
# Write to speaker device
s = StreamWriter(dst=path, format=device)
s.add_audio_stream(sample_rate, num_channels, format=format)
# write audio to the device
block_size = 256
with s.open():
for i in range(0, time, block_size):
s.write_audio_chunk(0, waveform[i : i + block_size, :])
|
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.servers import BaseServer
try:
total_time = 0
total_success = 0
timeout = args.timeout / 1000 if args.timeout != -1 else None
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=timeout)
else:
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = BaseServer.is_ready(
ctrl_address=f'{hostname}:{port}',
timeout=timeout,
protocol=protocol,
)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
timeout = args.timeout / 1000 if args.timeout != -1 else None
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(
ctrl_address=f'{hostname}:{port}',
timeout=timeout,
)
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol),
timeout=timeout,
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
from langchain_core.tracers.log_stream import (
LogEntry,
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
RunState,
)
__all__ = ["LogEntry", "LogStreamCallbackHandler", "RunLog", "RunLogPatch", "RunState"]
|
from langchain_core.tracers.log_stream import (
LogEntry,
LogStreamCallbackHandler,
RunLog,
RunLogPatch,
RunState,
)
__all__ = ["LogEntry", "RunState", "RunLogPatch", "RunLog", "LogStreamCallbackHandler"]
|
_base_ = './retinanet_r50-caffe_fpn_ms-1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './retinanet_r50_caffe_fpn_mstrain_1x_coco.py'
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate policy
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax import tensorboard
from keras.src.backend.jax.core import IS_THREAD_SAFE
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import name_scope
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.jax import core
from keras.src.backend.jax import distribution_lib
from keras.src.backend.jax import image
from keras.src.backend.jax import linalg
from keras.src.backend.jax import math
from keras.src.backend.jax import nn
from keras.src.backend.jax import numpy
from keras.src.backend.jax import random
from keras.src.backend.jax import tensorboard
from keras.src.backend.jax.core import IS_THREAD_SAFE
from keras.src.backend.jax.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.jax.core import Variable
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import compute_output_spec
from keras.src.backend.jax.core import cond
from keras.src.backend.jax.core import convert_to_numpy
from keras.src.backend.jax.core import convert_to_tensor
from keras.src.backend.jax.core import device_scope
from keras.src.backend.jax.core import is_tensor
from keras.src.backend.jax.core import random_seed_dtype
from keras.src.backend.jax.core import scatter
from keras.src.backend.jax.core import shape
from keras.src.backend.jax.core import stop_gradient
from keras.src.backend.jax.core import vectorized_map
from keras.src.backend.jax.rnn import cudnn_ok
from keras.src.backend.jax.rnn import gru
from keras.src.backend.jax.rnn import lstm
from keras.src.backend.jax.rnn import rnn
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
import gzip
from . import InputExample
class PairedFilesReader:
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
from __future__ import annotations
import gzip
from . import InputExample
class PairedFilesReader:
"""Reads in the a Pair Dataset, split in two files"""
def __init__(self, filepaths):
self.filepaths = filepaths
def get_examples(self, max_examples=0):
fIns = []
for filepath in self.filepaths:
fIn = (
gzip.open(filepath, "rt", encoding="utf-8")
if filepath.endswith(".gz")
else open(filepath, encoding="utf-8")
)
fIns.append(fIn)
examples = []
eof = False
while not eof:
texts = []
for fIn in fIns:
text = fIn.readline()
if text == "":
eof = True
break
texts.append(text)
if eof:
break
examples.append(InputExample(guid=str(len(examples)), texts=texts, label=1))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
"""Anyscale embeddings wrapper."""
from __future__ import annotations
from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "thenlper/gte-large"
class AnyscaleEmbeddings(OpenAIEmbeddings):
"""`Anyscale` Embeddings API."""
anyscale_api_key: Optional[SecretStr] = Field(default=None)
"""AnyScale Endpoints API keys."""
model: str = Field(default=DEFAULT_MODEL)
"""Model name to use."""
anyscale_api_base: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests."""
tiktoken_enabled: bool = False
"""Set this to False for non-OpenAI implementations of the embeddings API"""
embedding_ctx_length: int = 500
"""The maximum number of tokens to embed at once."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"anyscale_api_key": "ANYSCALE_API_KEY",
}
@pre_init
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
)
values["anyscale_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_API_BASE,
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility.
client_params = {
"api_key": values["anyscale_api_key"].get_secret_value(),
"base_url": values["anyscale_api_base"],
}
values["client"] = openai.OpenAI(**client_params).embeddings
else:
values["openai_api_base"] = values["anyscale_api_base"]
values["openai_api_key"] = values["anyscale_api_key"].get_secret_value()
values["client"] = openai.Embedding
return values
@property
def _llm_type(self) -> str:
return "anyscale-embedding"
|
"""Anyscale embeddings wrapper."""
from __future__ import annotations
from typing import Dict, Optional
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env, pre_init
from pydantic import Field, SecretStr
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.utils.openai import is_openai_v1
DEFAULT_API_BASE = "https://api.endpoints.anyscale.com/v1"
DEFAULT_MODEL = "thenlper/gte-large"
class AnyscaleEmbeddings(OpenAIEmbeddings):
"""`Anyscale` Embeddings API."""
anyscale_api_key: Optional[SecretStr] = Field(default=None)
"""AnyScale Endpoints API keys."""
model: str = Field(default=DEFAULT_MODEL)
"""Model name to use."""
anyscale_api_base: str = Field(default=DEFAULT_API_BASE)
"""Base URL path for API requests."""
tiktoken_enabled: bool = False
"""Set this to False for non-OpenAI implementations of the embeddings API"""
embedding_ctx_length: int = 500
"""The maximum number of tokens to embed at once."""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"anyscale_api_key": "ANYSCALE_API_KEY",
}
@pre_init
def validate_environment(cls, values: dict) -> dict:
"""Validate that api key and python package exists in environment."""
values["anyscale_api_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"anyscale_api_key",
"ANYSCALE_API_KEY",
)
)
values["anyscale_api_base"] = get_from_dict_or_env(
values,
"anyscale_api_base",
"ANYSCALE_API_BASE",
default=DEFAULT_API_BASE,
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
if is_openai_v1():
# For backwards compatibility.
client_params = {
"api_key": values["anyscale_api_key"].get_secret_value(),
"base_url": values["anyscale_api_base"],
}
values["client"] = openai.OpenAI(**client_params).embeddings
else:
values["openai_api_base"] = values["anyscale_api_base"]
values["openai_api_key"] = values["anyscale_api_key"].get_secret_value()
values["client"] = openai.Embedding # type: ignore[attr-defined]
return values
@property
def _llm_type(self) -> str:
return "anyscale-embedding"
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import-untyped]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
if IS_PYDANTIC_V1:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc]
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore[valid-type]
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
if IS_PYDANTIC_V1:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc]
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore[valid-type]
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import has_any, is_simple_tensor, query_bounding_box, query_spatial_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = fill
self._fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBox) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBox is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_spatial_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = query_bounding_box(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, spatial_size = F.crop_bounding_box(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_box(bounding_boxes, format=format, spatial_size=spatial_size)
height_and_width = F.convert_format_bounding_box(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBox):
inpt = datapoints.BoundingBox.wrap_like(
inpt,
F.clamp_bounding_box(inpt[params["is_valid"]], format=inpt.format, spatial_size=inpt.spatial_size),
)
if params["needs_pad"]:
fill = self._fill[type(inpt)]
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
from typing import Any, Dict, List, Optional, Sequence, Type, Union
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.prototype.datapoints import Label, OneHotLabel
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import _setup_fill_arg, _setup_size
from torchvision.transforms.v2.utils import has_any, is_simple_tensor, query_bounding_box, query_spatial_size
class FixedSizeCrop(Transform):
def __init__(
self,
size: Union[int, Sequence[int]],
fill: Union[datapoints._FillType, Dict[Type, datapoints._FillType]] = 0,
padding_mode: str = "constant",
) -> None:
super().__init__()
size = tuple(_setup_size(size, error_msg="Please provide only two dimensions (h, w) for size."))
self.crop_height = size[0]
self.crop_width = size[1]
self.fill = _setup_fill_arg(fill)
self.padding_mode = padding_mode
def _check_inputs(self, flat_inputs: List[Any]) -> None:
if not has_any(
flat_inputs,
PIL.Image.Image,
datapoints.Image,
is_simple_tensor,
datapoints.Video,
):
raise TypeError(
f"{type(self).__name__}() requires input sample to contain an tensor or PIL image or a Video."
)
if has_any(flat_inputs, datapoints.BoundingBox) and not has_any(flat_inputs, Label, OneHotLabel):
raise TypeError(
f"If a BoundingBox is contained in the input sample, "
f"{type(self).__name__}() also requires it to contain a Label or OneHotLabel."
)
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
height, width = query_spatial_size(flat_inputs)
new_height = min(height, self.crop_height)
new_width = min(width, self.crop_width)
needs_crop = new_height != height or new_width != width
offset_height = max(height - self.crop_height, 0)
offset_width = max(width - self.crop_width, 0)
r = torch.rand(1)
top = int(offset_height * r)
left = int(offset_width * r)
bounding_boxes: Optional[torch.Tensor]
try:
bounding_boxes = query_bounding_box(flat_inputs)
except ValueError:
bounding_boxes = None
if needs_crop and bounding_boxes is not None:
format = bounding_boxes.format
bounding_boxes, spatial_size = F.crop_bounding_box(
bounding_boxes.as_subclass(torch.Tensor),
format=format,
top=top,
left=left,
height=new_height,
width=new_width,
)
bounding_boxes = F.clamp_bounding_box(bounding_boxes, format=format, spatial_size=spatial_size)
height_and_width = F.convert_format_bounding_box(
bounding_boxes, old_format=format, new_format=datapoints.BoundingBoxFormat.XYWH
)[..., 2:]
is_valid = torch.all(height_and_width > 0, dim=-1)
else:
is_valid = None
pad_bottom = max(self.crop_height - new_height, 0)
pad_right = max(self.crop_width - new_width, 0)
needs_pad = pad_bottom != 0 or pad_right != 0
return dict(
needs_crop=needs_crop,
top=top,
left=left,
height=new_height,
width=new_width,
is_valid=is_valid,
padding=[0, 0, pad_right, pad_bottom],
needs_pad=needs_pad,
)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
if params["needs_crop"]:
inpt = F.crop(
inpt,
top=params["top"],
left=params["left"],
height=params["height"],
width=params["width"],
)
if params["is_valid"] is not None:
if isinstance(inpt, (Label, OneHotLabel, datapoints.Mask)):
inpt = inpt.wrap_like(inpt, inpt[params["is_valid"]]) # type: ignore[arg-type]
elif isinstance(inpt, datapoints.BoundingBox):
inpt = datapoints.BoundingBox.wrap_like(
inpt,
F.clamp_bounding_box(inpt[params["is_valid"]], format=inpt.format, spatial_size=inpt.spatial_size),
)
if params["needs_pad"]:
fill = self.fill[type(inpt)]
inpt = F.pad(inpt, params["padding"], fill=fill, padding_mode=self.padding_mode)
return inpt
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[dict], optional): Data from dataloader.
Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._do_before_epoch = before_epoch
self._do_after_epoch = after_epoch
self._do_after_iter = after_iter
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_iter:
torch.cuda.empty_cache()
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_before_epoch:
torch.cuda.empty_cache()
def _after_epoch(self, runner, mode: str = 'train') -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
if self._do_after_epoch:
torch.cuda.empty_cache()
|
from typing import Any, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> list[dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = get_session().get(
url=dataset_viewer_parquet_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == commit_hash or commit_hash is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (commit_hash='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> dict[str, dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = get_session().get(
url=dataset_viewer_info_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == commit_hash or commit_hash is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (commit_hash='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported dataset infos available.")
|
from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = get_session().get(
url=dataset_viewer_parquet_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == commit_hash or commit_hash is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (commit_hash='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = get_session().get(
url=dataset_viewer_info_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == commit_hash or commit_hash is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (commit_hash='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported dataset infos available.")
|
import pytest
import tensorflow as tf
from keras.src import backend
from keras.src.backend.tensorflow import random
from keras.src.testing import TestCase
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Only applies to TensorFlow random ops.",
)
class TFRandomTest(TestCase):
def test_categorical(self):
inputs = tf.ones([2, 3], dtype="float32")
outputs = random.categorical(inputs, 2, seed=42)
if tf.config.list_physical_devices("GPU"):
expected = tf.constant([[2, 2], [0, 1]])
else:
expected = tf.constant([[0, 2], [1, 0]])
self.assertAllClose(outputs, expected)
def test_categorical_seed_cast(self):
inputs = tf.ones([2, 3], dtype="float32")
seed = tf.int32.max + 1000
outputs_mod = random.categorical(inputs, 2, seed=seed)
outputs_nomod = random.categorical(inputs, 2, seed=1001)
self.assertAllClose(outputs_mod, outputs_nomod)
|
import pytest
import tensorflow as tf
from keras.src import backend
from keras.src.backend.tensorflow import random
from keras.src.testing import TestCase
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Only applies to TensorFlow random ops.",
)
class TFRandomTest(TestCase):
def test_categorical(self):
inputs = tf.ones([2, 3], dtype="float32")
outputs = random.categorical(inputs, 2, seed=42)
expected = tf.constant([[0, 2], [1, 0]])
self.assertAllClose(outputs, expected)
def test_categorical_seed_cast(self):
inputs = tf.ones([2, 3], dtype="float32")
seed = tf.int32.max + 1000
outputs_mod = random.categorical(inputs, 2, seed=seed)
outputs_nomod = random.categorical(inputs, 2, seed=1001)
self.assertAllClose(outputs_mod, outputs_nomod)
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sklearn.cluster import AgglomerativeClustering
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"A man is eating pasta.",
"The girl is carrying a baby.",
"The baby is carried by the woman",
"A man is riding a horse.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah is running behind its prey.",
"A cheetah chases prey on across a field.",
]
corpus_embeddings = embedder.encode(corpus)
# Some models don't automatically normalize the embeddings, in which case you should normalize the embeddings:
# corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform agglomerative clustering
clustering_model = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.5
) # , affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i + 1)
print(cluster)
print("")
|
"""
This is a simple application for sentence embeddings: clustering
Sentences are mapped to sentence embeddings and then agglomerative clustering with a threshold is applied.
"""
from sentence_transformers import SentenceTransformer
from sklearn.cluster import AgglomerativeClustering
import numpy as np
embedder = SentenceTransformer('all-MiniLM-L6-v2')
# Corpus with example sentences
corpus = ['A man is eating food.',
'A man is eating a piece of bread.',
'A man is eating pasta.',
'The girl is carrying a baby.',
'The baby is carried by the woman',
'A man is riding a horse.',
'A man is riding a white horse on an enclosed ground.',
'A monkey is playing drums.',
'Someone in a gorilla costume is playing a set of drums.',
'A cheetah is running behind its prey.',
'A cheetah chases prey on across a field.'
]
corpus_embeddings = embedder.encode(corpus)
# Normalize the embeddings to unit length
corpus_embeddings = corpus_embeddings / np.linalg.norm(corpus_embeddings, axis=1, keepdims=True)
# Perform kmean clustering
clustering_model = AgglomerativeClustering(n_clusters=None, distance_threshold=1.5) #, affinity='cosine', linkage='average', distance_threshold=0.4)
clustering_model.fit(corpus_embeddings)
cluster_assignment = clustering_model.labels_
clustered_sentences = {}
for sentence_id, cluster_id in enumerate(cluster_assignment):
if cluster_id not in clustered_sentences:
clustered_sentences[cluster_id] = []
clustered_sentences[cluster_id].append(corpus[sentence_id])
for i, cluster in clustered_sentences.items():
print("Cluster ", i+1)
print(cluster)
print("")
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.6.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.6.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray import DocList
from docarray.base_doc.doc import BaseDocWithoutId
def test_doc_list():
class A(BaseDocWithoutId):
text: str
cls_doc_list = DocList[A]
assert isinstance(cls_doc_list, type)
|
from docarray import DocList
from docarray.base_doc.doc import BaseDocWithoutId
def test_doc_list():
class A(BaseDocWithoutId):
text: str
cls_doc_list = DocList[A]
assert isinstance(cls_doc_list, type)
|
"""Intercom reader."""
import json
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class IntercomReader(BaseReader):
"""
Intercom reader. Reads data from a Intercom workspace.
Args:
personal_access_token (str): Intercom token.
"""
def __init__(self, intercom_access_token: str) -> None:
"""Initialize Intercom reader."""
self.intercom_access_token = intercom_access_token
def load_data(self) -> List[Document]:
"""
Load data from the workspace.
Args:
workspace_id (str): Workspace ID.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup
results = []
articles = self.get_all_articles()
for article in articles:
body = article["body"]
soup = BeautifulSoup(body, "html.parser")
body = soup.get_text()
extra_info = {
"id": article["id"],
"title": article["title"],
"url": article["url"],
"updated_at": article["updated_at"],
}
results.append(
Document(
text=body,
extra_info=extra_info or {},
)
)
return results
def get_all_articles(self):
articles = []
next_page = None
while True:
response = self.get_articles_page(next_page)
articles.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return articles
def get_articles_page(self, next_page: str = None):
import requests
if next_page is None:
url = "https://api.intercom.io/articles"
else:
url = next_page
headers = {
"accept": "application/json",
"Intercom-Version": "2.8",
"authorization": f"Bearer {self.intercom_access_token}",
}
response = requests.get(url, headers=headers)
response_json = json.loads(response.text)
next_page = response_json.get("pages", {}).get("next", None)
articles = response_json.get("data", [])
return {"articles": articles, "next_page": next_page}
|
"""Intercom reader."""
import json
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class IntercomReader(BaseReader):
"""Intercom reader. Reads data from a Intercom workspace.
Args:
personal_access_token (str): Intercom token.
"""
def __init__(self, intercom_access_token: str) -> None:
"""Initialize Intercom reader."""
self.intercom_access_token = intercom_access_token
def load_data(self) -> List[Document]:
"""Load data from the workspace.
Args:
workspace_id (str): Workspace ID.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup
results = []
articles = self.get_all_articles()
for article in articles:
body = article["body"]
soup = BeautifulSoup(body, "html.parser")
body = soup.get_text()
extra_info = {
"id": article["id"],
"title": article["title"],
"url": article["url"],
"updated_at": article["updated_at"],
}
results.append(
Document(
text=body,
extra_info=extra_info or {},
)
)
return results
def get_all_articles(self):
articles = []
next_page = None
while True:
response = self.get_articles_page(next_page)
articles.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return articles
def get_articles_page(self, next_page: str = None):
import requests
if next_page is None:
url = "https://api.intercom.io/articles"
else:
url = next_page
headers = {
"accept": "application/json",
"Intercom-Version": "2.8",
"authorization": f"Bearer {self.intercom_access_token}",
}
response = requests.get(url, headers=headers)
response_json = json.loads(response.text)
next_page = response_json.get("pages", {}).get("next", None)
articles = response_json.get("data", [])
return {"articles": articles, "next_page": next_page}
|
import time
import http.client
import json
from typing import List, Optional, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
class GalaxiaClient:
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int,
wait_time: int,
):
self.api_url = api_url
self.api_key = api_key
self.knowledge_base_id = knowledge_base_id
self.n_retries = n_retries
self.wait_time = wait_time
self.headers = {"X-Api-Key": api_key, "Content-Type": "application/json"}
def initialize(
self,
conn: http.client.HTTPSConnection,
question: str,
) -> dict:
payload_0 = '{\n "algorithmVersion":"%s",\n' % self.knowledge_base_id
payload_1 = ' "text":"%s" \n}' % question.replace('"', '\\"')
payload = payload_0 + payload_1
conn.request("POST", "/analyze/initialize", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def check_status(
self,
conn: http.client.HTTPSConnection,
init_res: dict,
) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/status", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def get_result(self, conn: http.client.HTTPSConnection, init_res: dict) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/result", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def retrieve(
self,
query: str,
) -> Union[dict, None]:
conn = http.client.HTTPSConnection(self.api_url)
flag_init = False
for i in range(self.n_retries):
init_res = self.initialize(conn, query)
if "operationId" in init_res:
flag_init = True
break
time.sleep(self.wait_time * i)
if not flag_init:
# failed to init
return None
flag_proc = False
for i in range(1, self.n_retries + 1):
time.sleep(self.wait_time * i)
status = self.check_status(conn, init_res)
if status["status"] == "processed":
flag_proc = True
break
if flag_proc:
res = self.get_result(conn, init_res)
return res["result"]["resultItems"]
else:
# failed to process
return None
class GalaxiaRetriever(BaseRetriever):
"""
Galaxia knowledge retriever.
before using the API create your knowledge base here:
beta.cloud.smabbler.com/
learn more here:
https://smabbler.gitbook.io/smabbler/api-rag/smabblers-api-rag
Args:
api_url : url of galaxia API, e.g. "https://beta.api.smabbler.com"
api_key : API key
knowledge_base_id : ID of the knowledge base (galaxia model)
Example:
.. code-block:: python
from llama_index.retrievers.galaxia import GalaxiaRetriever
from llama_index.core.schema import QueryBundle
retriever = GalaxiaRetriever(
api_url="beta.api.smabbler.com",
api_key="<key>",
knowledge_base_id="<knowledge_base_id>",
)
result = retriever._retrieve(QueryBundle(
"<test question>"
))
print(result)
"""
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int = 20,
wait_time: int = 2,
callback_manager: Optional[CallbackManager] = None,
):
self._client = GalaxiaClient(
api_url, api_key, knowledge_base_id, n_retries, wait_time
)
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
response = self._client.retrieve(query)
if response is None:
return []
node_with_score = []
for res in response:
node_with_score.append(
NodeWithScore(
node=TextNode(
text=res["category"],
metadata={
"model": res["model"],
"file": res["group"],
},
),
score=res["rank"],
)
)
return node_with_score
|
import time
import http.client
import json
from typing import List, Optional, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
class GalaxiaClient:
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int,
wait_time: int,
):
self.api_url = api_url
self.api_key = api_key
self.knowledge_base_id = knowledge_base_id
self.n_retries = n_retries
self.wait_time = wait_time
self.headers = {"X-Api-Key": api_key, "Content-Type": "application/json"}
def initialize(
self,
conn: http.client.HTTPSConnection,
question: str,
) -> dict:
payload_0 = '{\n "algorithmVersion":"%s",\n' % self.knowledge_base_id
payload_1 = ' "text":"%s" \n}' % question.replace('"', '\\"')
payload = payload_0 + payload_1
conn.request("POST", "/analyze/initialize", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def check_status(
self,
conn: http.client.HTTPSConnection,
init_res: dict,
) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/status", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def get_result(self, conn: http.client.HTTPSConnection, init_res: dict) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/result", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def retrieve(
self,
query: str,
) -> Union[dict, None]:
conn = http.client.HTTPSConnection(self.api_url)
flag_init = False
for i in range(self.n_retries):
init_res = self.initialize(conn, query)
if "operationId" in init_res:
flag_init = True
break
time.sleep(self.wait_time * i)
if not flag_init:
# failed to init
return None
flag_proc = False
for i in range(1, self.n_retries + 1):
time.sleep(self.wait_time * i)
status = self.check_status(conn, init_res)
if status["status"] == "processed":
flag_proc = True
break
if flag_proc:
res = self.get_result(conn, init_res)
return res["result"]["resultItems"]
else:
# failed to process
return None
class GalaxiaRetriever(BaseRetriever):
"""Galaxia knowledge retriever.
before using the API create your knowledge base here:
beta.cloud.smabbler.com/
learn more here:
https://smabbler.gitbook.io/smabbler/api-rag/smabblers-api-rag
Args:
api_url : url of galaxia API, e.g. "https://beta.api.smabbler.com"
api_key : API key
knowledge_base_id : ID of the knowledge base (galaxia model)
Example:
.. code-block:: python
from llama_index.retrievers.galaxia import GalaxiaRetriever
from llama_index.core.schema import QueryBundle
retriever = GalaxiaRetriever(
api_url="beta.api.smabbler.com",
api_key="<key>",
knowledge_base_id="<knowledge_base_id>",
)
result = retriever._retrieve(QueryBundle(
"<test question>"
))
print(result)
"""
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int = 20,
wait_time: int = 2,
callback_manager: Optional[CallbackManager] = None,
):
self._client = GalaxiaClient(
api_url, api_key, knowledge_base_id, n_retries, wait_time
)
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
response = self._client.retrieve(query)
if response is None:
return []
node_with_score = []
for res in response:
node_with_score.append(
NodeWithScore(
node=TextNode(
text=res["category"],
metadata={
"model": res["model"],
"file": res["group"],
},
),
score=res["rank"],
)
)
return node_with_score
|
from dataclasses import dataclass, fields
import pytest
from sklearn.base import (
BaseEstimator,
RegressorMixin,
TransformerMixin,
)
from sklearn.utils import Tags, get_tags
from sklearn.utils.estimator_checks import (
check_estimator_tags_renamed,
check_valid_tag_types,
)
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
class EmptyTransformer(TransformerMixin, BaseEstimator):
pass
class EmptyRegressor(RegressorMixin, BaseEstimator):
pass
@pytest.mark.filterwarnings("ignore:.*no __sklearn_tags__ attribute.*:FutureWarning")
@pytest.mark.parametrize(
"estimator, value",
[
[NoTagsEstimator(), False],
[ClassifierEstimator(), True],
[EmptyTransformer(), False],
[EmptyRegressor(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
def test_no___sklearn_tags__with_more_tags():
"""Test that calling `get_tags` on a class that defines `_more_tags` but not
`__sklearn_tags__` raises an error.
"""
class MoreTagsEstimator(BaseEstimator):
def _more_tags(self):
return {"requires_y": True} # pragma: no cover
with pytest.raises(
TypeError, match="has defined either `_more_tags` or `_get_tags`"
):
check_estimator_tags_renamed("MoreTagsEstimator", MoreTagsEstimator())
def test_tag_test_passes_with_inheritance():
@dataclass
class MyTags(Tags):
my_tag: bool = True
class MyEstimator(BaseEstimator):
def __sklearn_tags__(self):
tags_orig = super().__sklearn_tags__()
as_dict = {
field.name: getattr(tags_orig, field.name)
for field in fields(tags_orig)
}
tags = MyTags(**as_dict)
tags.my_tag = True
return tags
check_valid_tag_types("MyEstimator", MyEstimator())
|
import pytest
from sklearn.base import (
BaseEstimator,
RegressorMixin,
TransformerMixin,
)
from sklearn.utils._tags import get_tags
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
class EmptyTransformer(TransformerMixin, BaseEstimator):
pass
class EmptyRegressor(RegressorMixin, BaseEstimator):
pass
@pytest.mark.filterwarnings("ignore:.*no __sklearn_tags__ attribute.*:FutureWarning")
@pytest.mark.parametrize(
"estimator, value",
[
[NoTagsEstimator(), False],
[ClassifierEstimator(), True],
[EmptyTransformer(), False],
[EmptyRegressor(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
from mmcv.cnn.bricks import build_plugin_layer
from torch import Tensor
from mmdet.core.utils.typing import OptConfigType
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Defaults to 'sum'.
pre_cfg (:obj:`ConfigDict` or dict): Specify pre-processing modules.
Defaults to None.
post_cfg (:obj:`ConfigDict` or dict): Specify post-processing modules.
Defaults to None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation: str = 'sum',
pre_cfg: OptConfigType = None,
post_cfg: OptConfigType = None,
**kwargs) -> None:
super().__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if num_levels == 1:
return self.roi_layers[0](feats[0], rois)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.registry import MODELS
from .base_roi_extractor import BaseRoIExtractor
@MODELS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Default: 'sum'.
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
post_cfg (dict | None): Specify post-processing modules. Default: None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation='sum',
pre_cfg=None,
post_cfg=None,
**kwargs):
super(GenericRoIExtractor, self).__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_size_bounding_boxes,
get_size_image_tensor,
get_size_image_pil,
get_size_mask,
get_size_video,
get_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_boxes,
convert_format_bounding_boxes,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_boxes,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_boxes,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_boxes,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_boxes,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_boxes,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_boxes,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_boxes,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_boxes,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_boxes,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_boxes,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_boxes,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_boxes,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.registry import TRANSFORMS
@TRANSFORMS.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
Args:
action_candidate (tuple): Action candidates. "normal", "horizontal", \
"vertical", "skip" are supported. Default: ('normal', \
'horizontal', 'skip').
action_prob (tuple): Corresponding action probabilities. Should be \
the same length as action_candidate. Default: (1, 0, 0).
scale (tuple): (min scale, max scale). Default: (0.8, 1.2).
dx (int): The maximum x-axis shift will be (instance width) / dx.
Default 15.
dy (int): The maximum y-axis shift will be (instance height) / dy.
Default 15.
theta (tuple): (min rotation degree, max rotation degree). \
Default: (-1, 1).
color_prob (float): Probability of images for color augmentation.
Default 0.5.
heatmap_flag (bool): Whether to use heatmap guided. Default False.
aug_ratio (float): Probability of applying this transformation. \
Default 0.5.
"""
def __init__(self,
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
ori_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(ori_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
Args:
action_candidate (tuple): Action candidates. "normal", "horizontal", \
"vertical", "skip" are supported. Default: ('normal', \
'horizontal', 'skip').
action_prob (tuple): Corresponding action probabilities. Should be \
the same length as action_candidate. Default: (1, 0, 0).
scale (tuple): (min scale, max scale). Default: (0.8, 1.2).
dx (int): The maximum x-axis shift will be (instance width) / dx.
Default 15.
dy (int): The maximum y-axis shift will be (instance height) / dy.
Default 15.
theta (tuple): (min rotation degree, max rotation degree). \
Default: (-1, 1).
color_prob (float): Probability of images for color augmentation.
Default 0.5.
heatmap_flag (bool): Whether to use heatmap guided. Default False.
aug_ratio (float): Probability of applying this transformation. \
Default 0.5.
"""
def __init__(self,
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
ori_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(ori_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
#!/usr/bin/env python3
"""Trains a SentencePiece model on transcripts across LRS3 pretrain and trainval.
- `[lrs3_path]` is the directory path for the LRS3 cropped face dataset.
Example:
python train_spm.py --lrs3-path [lrs3_path]
"""
import io
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import sentencepiece as spm
def get_transcript_text(transcript_path):
return [open(transcript_path).read().splitlines()[0].lower()]
def get_transcripts(dataset_path):
transcript_paths = dataset_path.glob("*/*.txt")
merged_transcripts = []
for path in transcript_paths:
merged_transcripts += get_transcript_text(path)
return merged_transcripts
def train_spm(input):
model_writer = io.BytesIO()
spm.SentencePieceTrainer.train(
sentence_iterator=iter(input),
model_writer=model_writer,
vocab_size=1023,
model_type="unigram",
input_sentence_size=-1,
character_coverage=1.0,
bos_id=0,
pad_id=1,
eos_id=2,
unk_id=3,
)
return model_writer.getvalue()
def parse_args():
default_output_path = "./spm_unigram_1023.model"
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--lrs3-path",
type=pathlib.Path,
help="Path to LRS3 datasets.",
required=True,
)
parser.add_argument(
"--output-file",
default=pathlib.Path(default_output_path),
type=pathlib.Path,
help=f"File to save model to. (Default: '{default_output_path}')",
)
return parser.parse_args()
def run_cli():
args = parse_args()
root = args.lrs3_path / "LRS3_text_seg16s"
splits = ["pretrain", "trainval"]
merged_transcripts = []
for split in splits:
path = pathlib.Path(root) / split
merged_transcripts += get_transcripts(path)
model = train_spm(merged_transcripts)
with open(args.output_file, "wb") as f:
f.write(model)
if __name__ == "__main__":
run_cli()
|
#!/usr/bin/env python3
"""Trains a SentencePiece model on transcripts across LRS3 pretrain and trainval.
Example:
python train_spm.py --lrs3-path <LRS3-DIRECTORY>
"""
import io
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
import sentencepiece as spm
def get_transcript_text(transcript_path):
return [open(transcript_path).read().splitlines()[0].lower()]
def get_transcripts(dataset_path):
transcript_paths = dataset_path.glob("*/*.txt")
merged_transcripts = []
for path in transcript_paths:
merged_transcripts += get_transcript_text(path)
return merged_transcripts
def train_spm(input):
model_writer = io.BytesIO()
spm.SentencePieceTrainer.train(
sentence_iterator=iter(input),
model_writer=model_writer,
vocab_size=1023,
model_type="unigram",
input_sentence_size=-1,
character_coverage=1.0,
bos_id=0,
pad_id=1,
eos_id=2,
unk_id=3,
)
return model_writer.getvalue()
def parse_args():
default_output_path = "./spm_unigram_1023.model"
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--lrs3-path",
type=pathlib.Path,
help="Path to LRS3 datasets.",
required=True,
)
parser.add_argument(
"--output-file",
default=pathlib.Path(default_output_path),
type=pathlib.Path,
help=f"File to save model to. (Default: '{default_output_path}')",
)
return parser.parse_args()
def run_cli():
args = parse_args()
root = args.lrs3_path / "LRS3_text_seg24s"
splits = ["pretrain", "trainval"]
merged_transcripts = []
for split in splits:
path = pathlib.Path(root) / split
merged_transcripts += get_transcripts(path)
model = train_spm(merged_transcripts)
with open(args.output_file, "wb") as f:
f.write(model)
if __name__ == "__main__":
run_cli()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
default_traversal_paths: Iterable[str] = ('r',),
default_batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param device: Device to be used. Use 'gpu' for GPU or use 'cpu' for CPU.
"""
super().__init__(*args, **kwargs)
self.device = device
self.model = hub.Module(name=model_name)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""Encode doc content into vector representation.
:param docs: `DocumentArray` passed from the previous ``Executor``.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(
contents, use_gpu=self.device == 'gpu'
)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Optional, Tuple
import numpy as np
import paddlehub as hub
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
class TextPaddleEncoder(Executor):
"""
Encode an array of string in size `B` into an ndarray in size `B x D`
The ndarray potentially is BatchSize x (Channel x Height x Width)
Internally, :class:`TextPaddlehubEncoder` wraps the Ernie module from paddlehub.
https://github.com/PaddlePaddle/PaddleHub
For models' details refer to
https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel
"""
def __init__(
self,
model_name: Optional[str] = 'ernie_tiny',
on_gpu: bool = False,
default_batch_size: int = 32,
default_traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param model_name: the name of the model. Supported models include
``ernie``, ``ernie_tiny``, ``ernie_v2_eng_base``, ``ernie_v2_eng_large``,
``bert_chinese_L-12_H-768_A-12``, ``bert_multi_cased_L-12_H-768_A-12``,
``bert_multi_uncased_L-12_H-768_A-12``, ``bert_uncased_L-12_H-768_A-12``,
``bert_uncased_L-24_H-1024_A-16``, ``chinese-bert-wwm``,
``chinese-bert-wwm-ext``, ``chinese-electra-base``,
``chinese-electra-small``, ``chinese-roberta-wwm-ext``,
``chinese-roberta-wwm-ext-large``, ``rbt3``, ``rbtl3``
:param on_gpu: If use gpu to get the output.
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
"""
super().__init__(*args, **kwargs)
self.on_gpu = on_gpu
self.model = hub.Module(name=model_name)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
@requests
def encode(self, docs: DocumentArray, parameters: Dict, **kwargs):
"""Encode doc content into vector representation.
:param docs: `DocumentArray` passed from the previous ``Executor``.
:param parameters: dictionary to define the `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional key value arguments.
"""
if docs:
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
for batch_of_docs in document_batches_generator:
pooled_features = []
contents = [[doc.content] for doc in batch_of_docs]
results = self.model.get_embedding(contents, use_gpu=self.on_gpu)
for pooled_feature, _ in results:
pooled_features.append(pooled_feature)
for doc, feature in zip(batch_of_docs, pooled_features):
doc.embedding = np.asarray(feature)
|
"""Run smoke tests"""
import argparse
import torchaudio # noqa: F401
import torchaudio.compliance.kaldi # noqa: F401
import torchaudio.datasets # noqa: F401
import torchaudio.functional # noqa: F401
import torchaudio.models # noqa: F401
import torchaudio.pipelines # noqa: F401
import torchaudio.sox_effects # noqa: F401
import torchaudio.transforms # noqa: F401
import torchaudio.utils # noqa: F401
def ffmpeg_test():
from torchaudio.io import StreamReader # noqa: F401
def main() -> None:
parser = argparse.ArgumentParser()
# Warning: Please note this option should not be widely used, only use it when absolutely necessary
parser.add_argument("--no-ffmpeg", dest="ffmpeg", action="store_false")
options = parser.parse_args()
if options.ffmpeg:
ffmpeg_test()
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import torchaudio # noqa: F401
import torchaudio.compliance.kaldi # noqa: F401
import torchaudio.datasets # noqa: F401
import torchaudio.functional # noqa: F401
import torchaudio.models # noqa: F401
import torchaudio.pipelines # noqa: F401
import torchaudio.sox_effects # noqa: F401
import torchaudio.transforms # noqa: F401
import torchaudio.utils # noqa: F401
from torchaudio.io import StreamReader # noqa: F401
|
"""Utility to lazily import modules."""
from __future__ import annotations
import importlib
from typing import Any, TYPE_CHECKING
class _LazyModule:
"""Lazily import a module."""
def __init__(self, module_name: str) -> None:
self._name = module_name
self._module: Any = None
def __repr__(self) -> str:
return f"<lazy module '{self._name}'>"
def __getattr__(self, attr: str) -> object:
if self._module is None:
self._module = importlib.import_module(".", self._name)
return getattr(self._module, attr)
# Import the following modules during type checking to enable code intelligence features,
# such as auto-completion in tools like pylance, even when these modules are not explicitly
# imported in user code.
# NOTE: Add additional used imports here.
if TYPE_CHECKING:
import onnx
import onnx_ir # type: ignore[import-untyped]
import onnxscript
import onnxscript._framework_apis.torch_2_8 as onnxscript_apis
onnxscript_ir = onnx_ir
else:
onnx = _LazyModule("onnx")
onnxscript = _LazyModule("onnxscript")
onnxscript_ir = _LazyModule("onnx_ir")
onnxscript_apis = _LazyModule("onnxscript._framework_apis.torch_2_8")
|
"""Utility to lazily import modules."""
from __future__ import annotations
import importlib
from typing import Any, TYPE_CHECKING
class _LazyModule:
"""Lazily import a module."""
def __init__(self, module_name: str) -> None:
self._name = module_name
self._module: Any = None
def __repr__(self) -> str:
return f"<lazy module '{self._name}'>"
def __getattr__(self, attr: str) -> object:
if self._module is None:
self._module = importlib.import_module(".", self._name)
return getattr(self._module, attr)
# Import the following modules during type checking to enable code intelligence features,
# such as auto-completion in tools like pylance, even when these modules are not explicitly
# imported in user code.
# NOTE: Add additional used imports here.
if TYPE_CHECKING:
import onnx
import onnxscript
import onnxscript._framework_apis.torch_2_7 as onnxscript_apis
onnxscript_ir = onnxscript.ir
else:
onnx = _LazyModule("onnx")
onnxscript = _LazyModule("onnxscript")
onnxscript_ir = _LazyModule("onnxscript.ir")
onnxscript_apis = _LazyModule("onnxscript._framework_apis.torch_2_7")
|
import pytest
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
from llama_index.core.llms.mock import MockLLMWithNonyieldingChatStream
@pytest.fixture()
def nonyielding_llm() -> LLM:
return MockLLMWithNonyieldingChatStream()
@pytest.fixture()
def llm() -> LLM:
return MockLLM()
@pytest.fixture()
def prompt() -> str:
return "test prompt"
def test_llm_stream_chat_handles_nonyielding_stream(
nonyielding_llm: LLM, prompt: str
) -> None:
response = nonyielding_llm.stream_chat([ChatMessage(role="user", content=prompt)])
for _ in response:
pass
@pytest.mark.asyncio
async def test_llm_astream_chat_handles_nonyielding_stream(
nonyielding_llm: LLM, prompt: str
) -> None:
response = await nonyielding_llm.astream_chat(
[ChatMessage(role="user", content=prompt)]
)
async for _ in response:
pass
def test_llm_complete_prompt_arg(llm: LLM, prompt: str) -> None:
res = llm.complete(prompt)
expected_res_text = prompt
assert res.text == expected_res_text
def test_llm_complete_prompt_kwarg(llm: LLM, prompt: str) -> None:
res = llm.complete(prompt=prompt)
expected_res_text = prompt
assert res.text == expected_res_text
def test_llm_complete_throws_if_duplicate_prompt(llm: LLM, prompt: str) -> None:
with pytest.raises(TypeError):
llm.complete(prompt, prompt=prompt)
def test_llm_complete_throws_if_no_prompt(llm: LLM) -> None:
with pytest.raises(ValueError):
llm.complete()
def test_llm_stream_complete_prompt_arg(llm: LLM, prompt: str) -> None:
res_text = "".join(r.delta for r in llm.stream_complete(prompt))
expected_res_text = prompt
assert res_text == expected_res_text
def test_llm_stream_complete_prompt_kwarg(llm: LLM, prompt: str) -> None:
res_text = "".join(r.delta for r in llm.stream_complete(prompt=prompt))
expected_res_text = prompt
assert res_text == expected_res_text
def test_llm_stream_complete_throws_if_duplicate_prompt(llm: LLM, prompt: str) -> None:
with pytest.raises(TypeError):
llm.stream_complete(prompt, prompt=prompt)
def test_llm_stream_complete_throws_if_no_prompt(llm: LLM) -> None:
with pytest.raises(ValueError):
llm.stream_complete()
|
import pytest
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
from llama_index.core.llms.mock import MockLLMWithNonyieldingChatStream
@pytest.fixture()
def nonyielding_llm() -> LLM:
return MockLLMWithNonyieldingChatStream()
@pytest.fixture()
def llm() -> LLM:
return MockLLM()
@pytest.fixture()
def prompt() -> str:
return "test prompt"
def test_llm_stream_chat_handles_nonyielding_stream(
nonyielding_llm: LLM, prompt: str
) -> None:
response = nonyielding_llm.stream_chat([ChatMessage(role="user", content=prompt)])
for _ in response:
pass
@pytest.mark.asyncio()
async def test_llm_astream_chat_handles_nonyielding_stream(
nonyielding_llm: LLM, prompt: str
) -> None:
response = await nonyielding_llm.astream_chat(
[ChatMessage(role="user", content=prompt)]
)
async for _ in response:
pass
def test_llm_complete_prompt_arg(llm: LLM, prompt: str) -> None:
res = llm.complete(prompt)
expected_res_text = prompt
assert res.text == expected_res_text
def test_llm_complete_prompt_kwarg(llm: LLM, prompt: str) -> None:
res = llm.complete(prompt=prompt)
expected_res_text = prompt
assert res.text == expected_res_text
def test_llm_complete_throws_if_duplicate_prompt(llm: LLM, prompt: str) -> None:
with pytest.raises(TypeError):
llm.complete(prompt, prompt=prompt)
def test_llm_complete_throws_if_no_prompt(llm: LLM) -> None:
with pytest.raises(ValueError):
llm.complete()
def test_llm_stream_complete_prompt_arg(llm: LLM, prompt: str) -> None:
res_text = "".join(r.delta for r in llm.stream_complete(prompt))
expected_res_text = prompt
assert res_text == expected_res_text
def test_llm_stream_complete_prompt_kwarg(llm: LLM, prompt: str) -> None:
res_text = "".join(r.delta for r in llm.stream_complete(prompt=prompt))
expected_res_text = prompt
assert res_text == expected_res_text
def test_llm_stream_complete_throws_if_duplicate_prompt(llm: LLM, prompt: str) -> None:
with pytest.raises(TypeError):
llm.stream_complete(prompt, prompt=prompt)
def test_llm_stream_complete_throws_if_no_prompt(llm: LLM) -> None:
with pytest.raises(ValueError):
llm.stream_complete()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss', 'EQLV2Loss', 'MarginL2Loss', 'MultiPosCrossEntropyLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss', 'EQLV2Loss'
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=SpacyTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
)
assert len(resp) == 50
for doc in resp:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
def test_spacy_text_encoder():
docs = DocumentArray(
[
Document(text='Han likes eating pizza'),
Document(text='Han likes pizza'),
Document(text='Jina rocks'),
]
)
f = Flow().add(uses=SpacyTextEncoder)
with f:
docs = f.post(on='/test', inputs=docs)
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from spacy_text_encoder import SpacyTextEncoder
_EMBEDDING_DIM = 96
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=SpacyTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
def test_spacy_text_encoder():
docs = DocumentArray(
[
Document(text='Han likes eating pizza'),
Document(text='Han likes pizza'),
Document(text='Jina rocks'),
]
)
f = Flow().add(uses=SpacyTextEncoder)
with f:
resp = f.post(on='/test', inputs=docs, return_results=True)
docs = resp[0].docs
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96,)
|
from __future__ import annotations
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseTripletLoss(model), lambda_corpus=3e-5, lambda_query=5e-5)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
|
from __future__ import annotations
from sentence_transformers.losses.TripletLoss import TripletDistanceMetric, TripletLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseTripletLoss(TripletLoss):
def __init__(
self, model: SparseEncoder, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SparseEncoder
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.SparseTripletLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
super().__init__(model, distance_metric=distance_metric, triplet_margin=triplet_margin)
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_AddNoise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, torch.float64)
assert gradcheck(add_noise, (waveform, noise, lengths, snr))
assert gradgradcheck(add_noise, (waveform, noise, lengths, snr))
def test_Preemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
preemphasis = T.Preemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(preemphasis, (waveform,))
assert gradgradcheck(preemphasis, (waveform,))
def test_Deemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
deemphasis = T.Deemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(deemphasis, (waveform,))
assert gradgradcheck(deemphasis, (waveform,))
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_AddNoise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, torch.float64)
assert gradcheck(add_noise, (waveform, noise, lengths, snr))
assert gradgradcheck(add_noise, (waveform, noise, lengths, snr))
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'elastic_transport': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'true')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'false')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
draw_pred=False,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
draw_pred=False,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train", trust_remote_code=True)
corpus = dataset["answer"]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_video_torch_encoder():
model_state_dict_path = os.path.join(cur_dir, '../model/model_state_dict.pth')
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
f = Flow().add(
uses={
'jtype': 'CustomImageTorchEncoder',
'with': {
'model_state_dict_path': model_state_dict_path,
'layer_name': 'conv1',
'model_definition_file': os.path.join(
cur_dir, '../model/external_model.py'
),
'model_class_name': 'ExternalModel',
},
}
)
with f:
resp = f.post(on='/test', inputs=docs, return_results=True)
assert resp[0].docs[0].embedding.shape == (10,)
assert resp[0].docs[1].embedding.shape == (10,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_video_torch_encoder():
model_state_dict_path = os.path.join(cur_dir, '../model/model_state_dict.pth')
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
f = Flow().add(
uses={
'jtype': 'CustomImageTorchEncoder',
'with': {
'model_state_dict_path': model_state_dict_path,
'layer_name': 'conv1',
'model_definition_file': os.path.join(
cur_dir, '../model/external_model.py'
),
'model_class_name': 'ExternalModel',
},
}
)
with f:
resp = f.post(on='/test', inputs=docs, return_results=True)
assert resp[0].docs[0].embedding.shape == (10,)
assert resp[0].docs[1].embedding.shape == (10,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.ndarray import NdArray
@_register_proto(proto_type_name='ndarray_embedding')
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArray
|
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.ndarray import NdArray
class NdArrayEmbedding(NdArray, EmbeddingMixin):
alternative_type = NdArray
|
import tempfile
import os
import time
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'docarray_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='module')
def start_storage():
os.system(
f"docker-compose -f {compose_yml} --project-directory . up --build -d "
f"--remove-orphans"
)
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
yield
os.system(
f"docker-compose -f {compose_yml} --project-directory . down "
f"--remove-orphans"
)
|
import tempfile
import os
import time
import pytest
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(
os.path.join(cur_dir, 'unit', 'array', 'docker-compose.yml')
)
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'docarray_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def start_storage():
os.system(
f"docker-compose -f {compose_yml} --project-directory . up --build -d "
f"--remove-orphans"
)
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
yield
os.system(
f"docker-compose -f {compose_yml} --project-directory . down "
f"--remove-orphans"
)
|
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import ContributorDetails, SchemaField
class ReadCsvBlock(Block):
class Input(BlockSchema):
contents: str = SchemaField(
description="The contents of the CSV file to read",
placeholder="a, b, c\n1,2,3\n4,5,6",
)
delimiter: str = SchemaField(
description="The delimiter used in the CSV file",
default=",",
)
quotechar: str = SchemaField(
description="The character used to quote fields",
default='"',
)
escapechar: str = SchemaField(
description="The character used to escape the delimiter",
default="\\",
)
has_header: bool = SchemaField(
description="Whether the CSV file has a header row",
default=True,
)
skip_rows: int = SchemaField(
description="The number of rows to skip from the start of the file",
default=0,
)
strip: bool = SchemaField(
description="Whether to strip whitespace from the values",
default=True,
)
skip_columns: list[str] = SchemaField(
description="The columns to skip from the start of the row",
default_factory=list,
)
class Output(BlockSchema):
row: dict[str, str] = SchemaField(
description="The data produced from each row in the CSV file"
)
all_data: list[dict[str, str]] = SchemaField(
description="All the data in the CSV file as a list of rows"
)
def __init__(self):
super().__init__(
id="acf7625e-d2cb-4941-bfeb-2819fc6fc015",
input_schema=ReadCsvBlock.Input,
output_schema=ReadCsvBlock.Output,
description="Reads a CSV file and outputs the data as a list of dictionaries and individual rows via rows.",
contributors=[ContributorDetails(name="Nicholas Tindle")],
categories={BlockCategory.TEXT, BlockCategory.DATA},
test_input={
"contents": "a, b, c\n1,2,3\n4,5,6",
},
test_output=[
("row", {"a": "1", "b": "2", "c": "3"}),
("row", {"a": "4", "b": "5", "c": "6"}),
(
"all_data",
[
{"a": "1", "b": "2", "c": "3"},
{"a": "4", "b": "5", "c": "6"},
],
),
],
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
import csv
from io import StringIO
csv_file = StringIO(input_data.contents)
reader = csv.reader(
csv_file,
delimiter=input_data.delimiter,
quotechar=input_data.quotechar,
escapechar=input_data.escapechar,
)
header = None
if input_data.has_header:
header = next(reader)
if input_data.strip:
header = [h.strip() for h in header]
for _ in range(input_data.skip_rows):
next(reader)
def process_row(row):
data = {}
for i, value in enumerate(row):
if i not in input_data.skip_columns:
if input_data.has_header and header:
data[header[i]] = value.strip() if input_data.strip else value
else:
data[str(i)] = value.strip() if input_data.strip else value
return data
all_data = []
for row in reader:
processed_row = process_row(row)
all_data.append(processed_row)
yield "row", processed_row
yield "all_data", all_data
|
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import ContributorDetails, SchemaField
class ReadCsvBlock(Block):
class Input(BlockSchema):
contents: str = SchemaField(
description="The contents of the CSV file to read",
placeholder="a, b, c\n1,2,3\n4,5,6",
)
delimiter: str = SchemaField(
description="The delimiter used in the CSV file",
default=",",
)
quotechar: str = SchemaField(
description="The character used to quote fields",
default='"',
)
escapechar: str = SchemaField(
description="The character used to escape the delimiter",
default="\\",
)
has_header: bool = SchemaField(
description="Whether the CSV file has a header row",
default=True,
)
skip_rows: int = SchemaField(
description="The number of rows to skip from the start of the file",
default=0,
)
strip: bool = SchemaField(
description="Whether to strip whitespace from the values",
default=True,
)
skip_columns: list[str] = SchemaField(
description="The columns to skip from the start of the row",
default_factory=list,
)
class Output(BlockSchema):
row: dict[str, str] = SchemaField(
description="The data produced from each row in the CSV file"
)
all_data: list[dict[str, str]] = SchemaField(
description="All the data in the CSV file as a list of rows"
)
def __init__(self):
super().__init__(
id="acf7625e-d2cb-4941-bfeb-2819fc6fc015",
input_schema=ReadCsvBlock.Input,
output_schema=ReadCsvBlock.Output,
description="Reads a CSV file and outputs the data as a list of dictionaries and individual rows via rows.",
contributors=[ContributorDetails(name="Nicholas Tindle")],
categories={BlockCategory.TEXT, BlockCategory.DATA},
test_input={
"contents": "a, b, c\n1,2,3\n4,5,6",
},
test_output=[
("row", {"a": "1", "b": "2", "c": "3"}),
("row", {"a": "4", "b": "5", "c": "6"}),
(
"all_data",
[
{"a": "1", "b": "2", "c": "3"},
{"a": "4", "b": "5", "c": "6"},
],
),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
import csv
from io import StringIO
csv_file = StringIO(input_data.contents)
reader = csv.reader(
csv_file,
delimiter=input_data.delimiter,
quotechar=input_data.quotechar,
escapechar=input_data.escapechar,
)
header = None
if input_data.has_header:
header = next(reader)
if input_data.strip:
header = [h.strip() for h in header]
for _ in range(input_data.skip_rows):
next(reader)
def process_row(row):
data = {}
for i, value in enumerate(row):
if i not in input_data.skip_columns:
if input_data.has_header and header:
data[header[i]] = value.strip() if input_data.strip else value
else:
data[str(i)] = value.strip() if input_data.strip else value
return data
all_data = []
for row in reader:
processed_row = process_row(row)
all_data.append(processed_row)
yield "row", processed_row
yield "all_data", all_data
|
from datetime import datetime, timezone
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now().replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now().replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now().replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.15.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.8.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from torch import nn, Tensor
from typing import Iterable, Dict
import torch.nn.functional as F
from enum import Enum
from ..SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
sampling=False,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
sampling=True,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0))
]),
train_cfg=dict(rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1,
iou_calculator=dict(type='BboxOverlaps2D')),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.8),
min_bbox_size=0)))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
rpn_head=dict(
_delete_=True,
type='CascadeRPNHead',
num_stages=2,
stages=[
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[1.0],
strides=[4, 8, 16, 32, 64]),
adapt_cfg=dict(type='dilation', dilation=3),
bridged_feature=True,
sampling=False,
with_cls=False,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.1, 0.1, 0.5, 0.5)),
loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)),
dict(
type='StageCascadeRPNHead',
in_channels=256,
feat_channels=256,
adapt_cfg=dict(type='offset'),
bridged_feature=False,
sampling=True,
with_cls=True,
reg_decoded_bbox=True,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=(.0, .0, .0, .0),
target_stds=(0.05, 0.05, 0.1, 0.1)),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0))
]),
train_cfg=dict(rpn=[
dict(
assigner=dict(
type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.3,
ignore_iof_thr=-1,
iou_calculator=dict(type='BboxOverlaps2D')),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.8),
min_bbox_size=0)))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import MaskedConv2d
from ..builder import HEADS
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
@HEADS.register_module()
class GARetinaHead(GuidedAnchorHead):
"""Guided-Anchor-based RetinaNet head."""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
if init_cfg is None:
init_cfg = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=[
dict(
type='Normal',
name='conv_loc',
std=0.01,
bias_prob=0.01),
dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)
])
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(GARetinaHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
1)
self.feature_adaption_cls = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.feature_adaption_reg = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.retina_cls = MaskedConv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = MaskedConv2d(
self.feat_channels, self.num_base_priors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature map of a single scale level."""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import MaskedConv2d
from ..builder import HEADS
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
@HEADS.register_module()
class GARetinaHead(GuidedAnchorHead):
"""Guided-Anchor-based RetinaNet head."""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
if init_cfg is None:
init_cfg = dict(
type='Normal',
layer='Conv2d',
std=0.01,
override=[
dict(
type='Normal',
name='conv_loc',
std=0.01,
bias_prob=0.01),
dict(
type='Normal',
name='retina_cls',
std=0.01,
bias_prob=0.01)
])
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(GARetinaHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
1)
self.feature_adaption_cls = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.feature_adaption_reg = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deform_groups=self.deform_groups)
self.retina_cls = MaskedConv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = MaskedConv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def forward_single(self, x):
"""Forward feature map of a single scale level."""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
loc_pred = self.conv_loc(cls_feat)
shape_pred = self.conv_shape(reg_feat)
cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.retina_cls(cls_feat, mask)
bbox_pred = self.retina_reg(reg_feat, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.legacy.saving.serialization import (
serialize_keras_object as serialize_keras_object,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.saving.serialization import deserialize_keras_object
from keras.src.legacy.saving.serialization import serialize_keras_object
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.prototype.models import conv_tasnet_base
@dataclass
class SourceSeparationBundle:
"""torchaudio.prototype.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate of the audio that the model is trained on.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
"""Construct the model and load the pretrained weight."""
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained *ConvTasNet* [:footcite:`Luo_2019`] pipeline for source separation.
The underlying model is constructed by :py:func:`torchaudio.prototyoe.models.conv_tasnet_base`
and utilizes weights trained on *Libri2Mix dataset* [:footcite:`cosentino2020librimix`] using training script
``lightning_train.py`` `here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
|
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from torchaudio.prototype.models import conv_tasnet_base
@dataclass
class SourceSeparationBundle:
"""torchaudio.prototype.pipelines.SourceSeparationBundle()
Dataclass that bundles components for performing source separation.
Example
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX
>>> import torch
>>>
>>> # Build the separation model.
>>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
>>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
>>>
>>> # Instantiate the test set of Libri2Mix dataset.
>>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
>>>
>>> # Apply source separation on mixture audio.
>>> for i, data in enumerate(dataset):
>>> sample_rate, mixture, clean_sources = data
>>> # Make sure the shape of input suits the model requirement.
>>> mixture = mixture.reshape(1, 1, -1)
>>> estimated_sources = model(mixture)
>>> score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
>>> print(f"Si-SNR score is : {score}.)
>>> break
>>> Si-SNR score is : 16.24.
>>>
"""
_model_path: str
_model_factory_func: Callable[[], torch.nn.Module]
_sample_rate: int
@property
def sample_rate(self) -> int:
"""Sample rate (in cycles per second) of input waveforms.
:type: int
"""
return self._sample_rate
def get_model(self) -> torch.nn.Module:
model = self._model_factory_func()
path = torchaudio.utils.download_asset(self._model_path)
state_dict = torch.load(path)
model.load_state_dict(state_dict)
model.eval()
return model
CONVTASNET_BASE_LIBRI2MIX = SourceSeparationBundle(
_model_path="models/conv_tasnet_base_libri2mix.pt",
_model_factory_func=partial(conv_tasnet_base, num_sources=2),
_sample_rate=8000,
)
CONVTASNET_BASE_LIBRI2MIX.__doc__ = """Pre-trained ConvTasNet pipeline for source separation.
The underlying model is constructed by :py:func:`torchaudio.prototyoe.models.conv_tasnet_base`
and utilizes weights trained on Libri2Mix using training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/main/examples/source_separation/>`__ with default arguments.
Please refer to :py:class:`SourceSeparationBundle` for usage instructions.
"""
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
ReconstructionLoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity, TopKActivation
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"TopKActivation",
# Losses
"CSRLoss",
"ReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add in models the possibility to have the MLM head(for splade)
# TODO : Check for every loss if compatible with the SparseEncoder but should have some minor modifications for the ones not using the utils similarity function
# TODO : Same for the evaluator
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
ReconstructionLoss,
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.models import CSRSparsity, TopKActivation
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"CSRSparsity",
"TopKActivation",
"CSRLoss",
"ReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseInformationRetrievalEvaluator",
"SparseEmbeddingSimilarityEvaluator",
]
# TODO : Complete the SparseEncoder class by finishing the overide of the functions
# TODO : Add tests for the SparseEncoder class
# TODO : Add in models the possibility to have the MLM head(for splade)
# TODO : Check for every loss if compatible with the SparseEncoder but should have some minor modifications for the ones not using the utils similarity function
# TODO : Same for the evaluator
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
import numpy as np
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.tensor(vectors, dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self.assertTrue(gradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
self.assertTrue(gradgradcheck(F.speed, (waveform, lengths, 1000, 1.1)))
|
import torch
import torchaudio.prototype.functional as F
from parameterized import parameterized
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin
class AutogradTestImpl(TestBaseMixin):
@nested_params(
[F.convolve, F.fftconvolve],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device, requires_grad=True)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device, requires_grad=True)
self.assertTrue(gradcheck(fn, (x, y, mode)))
self.assertTrue(gradgradcheck(fn, (x, y, mode)))
def test_add_noise(self):
leading_dims = (5, 2, 3)
L = 51
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self.assertTrue(gradcheck(F.add_noise, (waveform, noise, lengths, snr)))
self.assertTrue(gradgradcheck(F.add_noise, (waveform, noise, lengths, snr)))
@parameterized.expand(
[
(8000, (2, 3, 5, 7)),
(8000, (8000, 1)),
]
)
def test_oscillator_bank(self, sample_rate, shape):
# can be replaced with math.prod when we drop 3.7 support
def prod(iterable):
ret = 1
for item in iterable:
ret *= item
return ret
numel = prod(shape)
# use 1.9 instead of 2 so as to include values above nyquist frequency
fmax = sample_rate / 1.9
freq = torch.linspace(-fmax, fmax, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(
shape
)
amps = torch.linspace(-5, 5, numel, dtype=self.dtype, device=self.device, requires_grad=True).reshape(shape)
assert gradcheck(F.oscillator_bank, (freq, amps, sample_rate))
def test_extend_pitch(self):
num_frames, num_pitches = 5, 7
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype, requires_grad=True)
pattern = torch.linspace(1, num_pitches, num_pitches, device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.extend_pitch, (input, num_pitches))
assert gradcheck(F.extend_pitch, (input, pattern))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype, requires_grad=True)
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, False))
assert gradcheck(F.sinc_impulse_response, (cutoff, 513, True))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, SIoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'SIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p',
'carl_loss', 'AssociativeEmbeddingLoss', 'GaussianFocalLoss',
'QualityFocalLoss', 'DistributionFocalLoss', 'VarifocalLoss',
'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss', 'EQLV2Loss',
'MarginL2Loss', 'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .eqlv2_loss import EQLV2Loss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, EIoULoss, GIoULoss,
IoULoss, bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .l2_loss import L2Loss
from .margin_loss import MarginL2Loss
from .mse_loss import MSELoss, mse_loss
from .multipos_cross_entropy_loss import MultiPosCrossEntropyLoss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .triplet_loss import TripletLoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss',
'EIoULoss', 'GHMC', 'GHMR', 'reduce_loss', 'weight_reduce_loss',
'weighted_loss', 'L1Loss', 'l1_loss', 'isr_p', 'carl_loss',
'AssociativeEmbeddingLoss', 'GaussianFocalLoss', 'QualityFocalLoss',
'DistributionFocalLoss', 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss',
'SeesawLoss', 'DiceLoss', 'EQLV2Loss', 'MarginL2Loss',
'MultiPosCrossEntropyLoss', 'L2Loss', 'TripletLoss'
]
|
_base_ = './cascade-rcnn_r50_fpn_1x_coco.py'
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
type='CascadeRCNN',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
|
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
# TODO support proposals input
data = dict(
train=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optim_wrapper = dict(clip_grad=dict(max_norm=35, norm_type=2))
|
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
roi_head=dict(
bbox_head=dict(
bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rcnn=dict(
assigner=dict(
pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
sampler=dict(num=256))),
test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadProposals', num_max_proposals=300),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['proposals']),
dict(
type='ToDataContainer',
fields=[dict(key='proposals', stack=False)]),
dict(type='Collect', keys=['img', 'proposals']),
])
]
data = dict(
train=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl',
pipeline=train_pipeline),
val=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline),
test=dict(
proposal_file=data_root +
'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
pipeline=test_pipeline))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
from torchaudio.models.rnnt import emformer_rnnt_model
# https://pytorch.org/audio/master/_modules/torchaudio/models/rnnt.html#emformer_rnnt_base
def emformer_rnnt():
return emformer_rnnt_model(
input_dim=512,
encoding_dim=1024,
num_symbols=1024,
segment_length=64,
right_context_length=0,
time_reduction_input_dim=128,
time_reduction_stride=1,
transformer_num_heads=4,
transformer_ffn_dim=2048,
transformer_num_layers=20,
transformer_dropout=0.1,
transformer_activation="gelu",
transformer_left_context_length=30,
transformer_max_memory_size=0,
transformer_weight_init_scale_strategy="depthwise",
transformer_tanh_on_mem=True,
symbol_embedding_dim=512,
num_lstm_layers=3,
lstm_layer_norm=True,
lstm_layer_norm_epsilon=1e-3,
lstm_dropout=0.3,
)
|
from torchaudio.models.rnnt import emformer_rnnt_model
# https://pytorch.org/audio/master/_modules/torchaudio/models/rnnt.html#emformer_rnnt_base
def emformer_rnnt():
return emformer_rnnt_model(
input_dim=512,
encoding_dim=1024,
num_symbols=1024,
segment_length=64,
right_context_length=0,
time_reduction_input_dim=128,
time_reduction_stride=1,
transformer_num_heads=4,
transformer_ffn_dim=2048,
transformer_num_layers=20,
transformer_dropout=0.1,
transformer_activation="gelu",
transformer_left_context_length=30,
transformer_max_memory_size=0,
transformer_weight_init_scale_strategy="depthwise",
transformer_tanh_on_mem=True,
symbol_embedding_dim=512,
num_lstm_layers=3,
lstm_layer_norm=True,
lstm_layer_norm_epsilon=1e-3,
lstm_dropout=0.3,
)
|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
msg = f"Got unexpected transformer type: {_transformer}"
raise ValueError(msg)
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
msg = f"Got unexpected transformer type: {_transformer}"
raise ValueError(msg)
return documents
|
from collections.abc import Sequence
from inspect import signature
from typing import Optional, Union
from langchain_core.callbacks import Callbacks
from langchain_core.documents import (
BaseDocumentCompressor,
BaseDocumentTransformer,
Document,
)
from pydantic import ConfigDict
class DocumentCompressorPipeline(BaseDocumentCompressor):
"""Document compressor that uses a pipeline of Transformers."""
transformers: list[Union[BaseDocumentTransformer, BaseDocumentCompressor]]
"""List of document filters that are chained together and run in sequence."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Transform a list of documents."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.compress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = _transformer.compress_documents(
documents, query, callbacks=callbacks
)
else:
documents = _transformer.compress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = _transformer.transform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context."""
for _transformer in self.transformers:
if isinstance(_transformer, BaseDocumentCompressor):
accepts_callbacks = (
signature(_transformer.acompress_documents).parameters.get(
"callbacks"
)
is not None
)
if accepts_callbacks:
documents = await _transformer.acompress_documents(
documents, query, callbacks=callbacks
)
else:
documents = await _transformer.acompress_documents(documents, query)
elif isinstance(_transformer, BaseDocumentTransformer):
documents = await _transformer.atransform_documents(documents)
else:
raise ValueError(f"Got unexpected transformer type: {_transformer}")
return documents
|
import os
import pytest
from typing import List
from unittest.mock import MagicMock, patch, AsyncMock
import uuid
from llama_index.core.base.base_selector import (
SelectorResult,
SingleSelection,
)
from llama_index.core.schema import QueryBundle
from llama_index.core.tools import ToolMetadata
from llama_index.selectors.notdiamond.base import NotDiamondSelector, LLMSingleSelector
from notdiamond import LLMConfig
@pytest.fixture()
def session_id() -> str:
return str(uuid.uuid4())
@pytest.fixture()
def choices() -> List[ToolMetadata]:
return [
ToolMetadata(
name="vector_index", description="Great for asking questions about recipes."
),
ToolMetadata(name="list_index", description="Great for summarizing recipes."),
]
@pytest.fixture()
def nd_selector(session_id):
from notdiamond import NotDiamond
os.environ["OPENAI_API_KEY"] = "test"
os.environ["ANTHROPIC_API_KEY"] = "test"
llm_configs = [
LLMConfig(provider="openai", model="gpt-4o"),
]
# mocking out model_select calls on client
_client = MagicMock(stub=NotDiamond, api_key="test", llm_configs=llm_configs)
_client.model_select.return_value = (session_id, llm_configs[0])
async def aselect(*args, **kwargs):
return (session_id, llm_configs[0])
_client.amodel_select = aselect
selector = NotDiamondSelector(client=_client)
# monkeypatch the _select and _aselect methods on parent class of NDSelector
LLMSingleSelector._select = MagicMock(
return_value=SelectorResult(
selections=[SingleSelection(index=0, reason="test")]
)
)
LLMSingleSelector._aselect = AsyncMock(
return_value=SelectorResult(
selections=[SingleSelection(index=1, reason="test")]
)
)
return selector
class TestNotDiamondSelector:
@patch("llama_index.llms.openai.OpenAI")
def test_select(self, openai_mock, nd_selector, choices, session_id):
"""_select should call openai, as mocked."""
openai_mock.return_value = MagicMock()
openai_mock.return_value.chat.return_value.message.content = "vector_index"
query = "Please describe the llama_index framework in 280 characters or less."
result = nd_selector._select(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "openai/gpt-4o"
assert result.selections[0].index == 0
assert openai_mock.is_called
@pytest.mark.asyncio()
@patch("llama_index.llms.openai.OpenAI")
async def test_aselect(self, openai_mock, nd_selector, choices, session_id):
"""_aselect should call openai, as mocked."""
openai_mock.return_value = MagicMock()
openai_mock.return_value.chat.return_value.message.content = "vector_index"
query = "How can I cook a vegan variant of deviled eggs?"
result = await nd_selector._aselect(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "openai/gpt-4o"
assert result.selections[0].index == 1
assert openai_mock.is_called
|
import os
import pytest
from typing import List
from unittest.mock import MagicMock, patch, AsyncMock
import uuid
from llama_index.core.base.base_selector import (
SelectorResult,
SingleSelection,
)
from llama_index.core.schema import QueryBundle
from llama_index.core.tools import ToolMetadata
from llama_index.selectors.notdiamond.base import NotDiamondSelector, LLMSingleSelector
from notdiamond import LLMConfig
@pytest.fixture()
def session_id() -> str:
return str(uuid.uuid4())
@pytest.fixture()
def choices() -> List[ToolMetadata]:
return [
ToolMetadata(
name="vector_index", description="Great for asking questions about recipes."
),
ToolMetadata(name="list_index", description="Great for summarizing recipes."),
]
@pytest.fixture()
def nd_selector(session_id):
from notdiamond import NotDiamond
os.environ["OPENAI_API_KEY"] = "test"
os.environ["ANTHROPIC_API_KEY"] = "test"
llm_configs = [
LLMConfig(provider="openai", model="gpt-4o"),
LLMConfig(provider="anthropic", model="claude-3-opus-20240229"),
]
# mocking out model_select calls on client
_client = MagicMock(stub=NotDiamond, api_key="test", llm_configs=llm_configs)
_client.model_select.return_value = (session_id, llm_configs[0])
async def aselect(*args, **kwargs):
return (session_id, llm_configs[0])
_client.amodel_select = aselect
selector = NotDiamondSelector(client=_client)
# monkeypatch the _select and _aselect methods on parent class of NDSelector
LLMSingleSelector._select = MagicMock(
return_value=SelectorResult(
selections=[SingleSelection(index=0, reason="test")]
)
)
LLMSingleSelector._aselect = AsyncMock(
return_value=SelectorResult(
selections=[SingleSelection(index=1, reason="test")]
)
)
return selector
class TestNotDiamondSelector:
@patch("llama_index.llms.openai.OpenAI")
def test_select(self, openai_mock, nd_selector, choices, session_id):
"""_select should call openai, as mocked."""
openai_mock.return_value = MagicMock()
openai_mock.return_value.chat.return_value.message.content = "vector_index"
query = "Please describe the llama_index framework in 280 characters or less."
result = nd_selector._select(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "openai/gpt-4o"
assert result.selections[0].index == 0
assert openai_mock.is_called
@pytest.mark.asyncio()
@patch("llama_index.llms.anthropic.Anthropic")
async def test_aselect(self, anthropic_mock, nd_selector, choices, session_id):
"""_aselect should call anthropic, as mocked."""
anthropic_mock.return_value = MagicMock()
anthropic_mock.return_value.chat.return_value.message.content = "vector_index"
query = "How can I cook a vegan variant of deviled eggs?"
result = await nd_selector._aselect(choices, QueryBundle(query_str=query))
assert result.session_id == session_id
assert str(result.llm) == "anthropic/claude-3-opus-20240229"
assert result.selections[0].index == 1
assert anthropic_mock.is_called
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
from ._dataset_wrapper import wrap_dataset_for_transforms_v2 # type: ignore[attr-defined] # usort: skip
|
from ._bounding_box import BoundingBox, BoundingBoxFormat
from ._datapoint import FillType, FillTypeJIT, InputType, InputTypeJIT
from ._image import Image, ImageType, ImageTypeJIT, TensorImageType, TensorImageTypeJIT
from ._label import Label, OneHotLabel
from ._mask import Mask
from ._video import TensorVideoType, TensorVideoTypeJIT, Video, VideoType, VideoTypeJIT
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook'
]
|
from .checkloss_hook import CheckInvalidLossHook
from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook
from .sync_norm_hook import SyncNormHook
from .sync_random_size_hook import SyncRandomSizeHook
from .yolox_lrupdater_hook import YOLOXLrUpdaterHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',
'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',
'CheckInvalidLossHook'
]
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, datasets, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Train Parameters
model_name = "bert-base-uncased"
batch_size = 8
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_tsdae{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
TSDAE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_tsdae_from_file.py path/to/sentences.txt
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
from sentence_transformers import models, datasets, losses
import logging
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
import sys
import tqdm
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Train Parameters
model_name = "bert-base-uncased"
batch_size = 8
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_tsdae{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name)
# Apply **cls** pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), "cls")
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train and evaluate the model (it needs about 1 hour for one epoch of AskUbuntu) #################
# We wrap our training sentences in the DenoisingAutoEncoderDataset to add deletion noise on the fly
train_dataset = datasets.DenoisingAutoEncoderDataset(train_sentences)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.DenoisingAutoEncoderLoss(model, decoder_name_or_path=model_name, tie_encoder_decoder=True)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=1,
weight_decay=0,
scheduler="constantlr",
optimizer_params={"lr": 3e-5},
show_progress_bar=True,
checkpoint_path=model_output_path,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import unittest
from unittest import TestCase
import torch
from mmengine.logging import MessageHub
from mmengine.registry import init_default_scope
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_track_inputs, get_detector_cfg
class TestDeepSORT(TestCase):
@classmethod
def setUpClass(cls):
init_default_scope('mmdet')
@parameterized.expand([
'strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman'
'-mot17halftrain_test-mot17halfval.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.detector.neck.out_channels = 1
model.detector.neck.num_csp_blocks = 1
model.detector.bbox_head.in_channels = 1
model.detector.bbox_head.feat_channels = 1
model.reid.backbone.depth = 18
model.reid.head.fc_channels = 1
model.reid.head.out_channels = 1
model.reid.head.num_classes = 2
model.cmc = None
model = MODELS.build(model)
assert model.detector
assert model.reid
assert model.tracker
@parameterized.expand([
('strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman'
'-mot17halftrain_test-mot17halfval.py', ('cpu', 'cuda')),
])
def test_strongsort_forward_predict_mode(self, cfg_file, devices):
message_hub = MessageHub.get_instance(
f'test_strongsort_forward_predict_mode-{time.time()}')
message_hub.update_info('iter', 0)
message_hub.update_info('epoch', 0)
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
_model = get_detector_cfg(cfg_file)
_model.detector.neck.out_channels = 1
_model.detector.neck.num_csp_blocks = 1
_model.detector.bbox_head.in_channels = 1
_model.detector.bbox_head.feat_channels = 1
_model.reid.backbone.depth = 18
_model.reid.head.in_channels = 512
_model.reid.head.fc_channels = 1
_model.reid.head.out_channels = 1
_model.reid.head.num_classes = 2
_model.cmc = None
model = MODELS.build(_model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
model = model.cuda()
packed_inputs = demo_track_inputs(
batch_size=1,
num_frames=2,
image_shapes=[(3, 256, 256)],
num_classes=1)
out_data = model.data_preprocessor(packed_inputs, False)
# Test forward test
model.eval()
with torch.no_grad():
batch_results = model.forward(**out_data, mode='predict')
assert len(batch_results) == 1
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import unittest
from unittest import TestCase
import torch
from mmengine.logging import MessageHub
from mmengine.registry import init_default_scope
from parameterized import parameterized
from mmdet.registry import MODELS
from mmdet.testing import demo_track_inputs, get_detector_cfg
class TestDeepSORT(TestCase):
@classmethod
def setUpClass(cls):
init_default_scope('mmdet')
@parameterized.expand([
'strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman'
'-mot17halftrain_test-mot17halfval.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.detector.neck.out_channels = 1
model.detector.neck.num_csp_blocks = 1
model.detector.bbox_head.in_channels = 1
model.detector.bbox_head.feat_channels = 1
model.reid.backbone.depth = 18
model.reid.head.fc_channels = 1
model.reid.head.out_channels = 1
model.reid.head.num_classes = 2
model = MODELS.build(model)
assert model.detector
assert model.reid
assert model.cmc
assert model.tracker
@parameterized.expand([
('strongsort/strongsort_yolox_x_8xb4-80e_crowdhuman'
'-mot17halftrain_test-mot17halfval.py', ('cpu', 'cuda')),
])
def test_strongsort_forward_predict_mode(self, cfg_file, devices):
message_hub = MessageHub.get_instance(
f'test_strongsort_forward_predict_mode-{time.time()}')
message_hub.update_info('iter', 0)
message_hub.update_info('epoch', 0)
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
_model = get_detector_cfg(cfg_file)
_model.detector.neck.out_channels = 1
_model.detector.neck.num_csp_blocks = 1
_model.detector.bbox_head.in_channels = 1
_model.detector.bbox_head.feat_channels = 1
_model.reid.backbone.depth = 18
_model.reid.head.in_channels = 512
_model.reid.head.fc_channels = 1
_model.reid.head.out_channels = 1
_model.reid.head.num_classes = 2
model = MODELS.build(_model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
model = model.cuda()
packed_inputs = demo_track_inputs(
batch_size=1,
num_frames=2,
image_shapes=[(3, 256, 256)],
num_classes=1)
out_data = model.data_preprocessor(packed_inputs, False)
# Test forward test
model.eval()
with torch.no_grad():
batch_results = model.forward(**out_data, mode='predict')
assert len(batch_results) == 1
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~SentenceTransformerTrainingArguments` which itself extend
:class:`~transformers.TrainingArguments` with additional arguments specific to Sentence Transformers.
See :class:`~transformers.TrainingArguments` for the complete list of available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
from __future__ import annotations
from dataclasses import dataclass
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
@dataclass
class SparseEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
SparseEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .sparse_rcnn import SparseRCNN
@MODELS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .sparse_rcnn import SparseRCNN
@MODELS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(QueryInst, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
|
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor
import copy
from docarray import BaseDoc
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDoc):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(BaseDoc):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
def test_torchtensor_deepcopy():
# Setup
original_tensor_float = TorchTensor(torch.rand(10))
original_tensor_int = TorchTensor(torch.randint(0, 100, (10,)))
# Exercise
copied_tensor_float = copy.deepcopy(original_tensor_float)
copied_tensor_int = copy.deepcopy(original_tensor_int)
# Verify
assert torch.equal(original_tensor_float, copied_tensor_float)
assert original_tensor_float is not copied_tensor_float
assert torch.equal(original_tensor_int, copied_tensor_int)
assert original_tensor_int is not copied_tensor_int
|
import torch
from docarray import BaseDoc
from docarray.typing import TorchEmbedding, TorchTensor
def test_set_torch_tensor():
class MyDocument(BaseDoc):
tensor: TorchTensor
d = MyDocument(tensor=torch.zeros((3, 224, 224)))
assert isinstance(d.tensor, TorchTensor)
assert isinstance(d.tensor, torch.Tensor)
assert (d.tensor == torch.zeros((3, 224, 224))).all()
def test_set_torch_embedding():
class MyDocument(BaseDoc):
embedding: TorchEmbedding
d = MyDocument(embedding=torch.zeros((128,)))
assert isinstance(d.embedding, TorchTensor)
assert isinstance(d.embedding, TorchEmbedding)
assert isinstance(d.embedding, torch.Tensor)
assert (d.embedding == torch.zeros((128,))).all()
|
# data settings
dataset_type = 'CocoCaptionDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
test_pipeline = [
dict(
type='LoadImageFromFile',
imdecode_backend='pillow',
backend_args=backend_args),
dict(
type='Resize',
scale=(224, 224),
interpolation='bicubic',
backend='pillow'),
dict(type='PackInputs', meta_keys=['image_id']),
]
# ann_file download from
# train dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json # noqa
# val dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val.json # noqa
# test dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test.json # noqa
# val evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json # noqa
# test evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json # noqa
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/coco_karpathy_val.json',
pipeline=test_pipeline,
))
val_evaluator = dict(
type='COCOCaptionMetric',
ann_file=data_root + 'annotations/coco_karpathy_val_gt.json',
)
# # If you want standard test, please manually configure the test dataset
test_dataloader = val_dataloader
test_evaluator = val_evaluator
|
# data settings
dataset_type = 'COCOCaptionDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
test_pipeline = [
dict(
type='LoadImageFromFile',
imdecode_backend='pillow',
backend_args=backend_args),
dict(
type='Resize',
scale=(224, 224),
interpolation='bicubic',
backend='pillow'),
dict(type='PackInputs', meta_keys=['image_id']),
]
# ann_file download from
# train dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_train.json # noqa
# val dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val.json # noqa
# test dataset: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test.json # noqa
# val evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json # noqa
# test evaluator: https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json # noqa
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/coco_karpathy_val.json',
pipeline=test_pipeline,
))
val_evaluator = dict(
type='COCOCaptionMetric',
ann_file=data_root + 'annotations/coco_karpathy_val_gt.json',
)
# # If you want standard test, please manually configure the test dataset
test_dataloader = val_dataloader
test_evaluator = val_evaluator
|
"""Pydantic v1 compatibility shim."""
from importlib import metadata
from pydantic.v1 import * # noqa: F403
from langchain_core._api.deprecation import warn_deprecated
try:
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
except metadata.PackageNotFoundError:
_PYDANTIC_MAJOR_VERSION = 0
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""Pydantic v1 compatibility shim."""
from importlib import metadata
from langchain_core._api.deprecation import warn_deprecated
# Create namespaces for pydantic v1 and v2.
# This code must stay at the top of the file before other modules may
# attempt to import pydantic since it adds pydantic_v1 and pydantic_v2 to sys.modules.
#
# This hack is done for the following reasons:
# * Langchain will attempt to remain compatible with both pydantic v1 and v2 since
# both dependencies and dependents may be stuck on either version of v1 or v2.
# * Creating namespaces for pydantic v1 and v2 should allow us to write code that
# unambiguously uses either v1 or v2 API.
# * This change is easier to roll out and roll back.
try:
from pydantic.v1 import * # noqa: F403
except ImportError:
from pydantic import * # type: ignore[assignment,no-redef] # noqa: F403
try:
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
except metadata.PackageNotFoundError:
_PYDANTIC_MAJOR_VERSION = 0
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
from torchaudio._internal.module_utils import dropping_support
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AddNoise,
AmplitudeToDB,
ComputeDeltas,
Convolve,
Deemphasis,
Fade,
FFTConvolve,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Preemphasis,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpecAugment,
SpectralCentroid,
Spectrogram,
Speed,
SpeedPerturbation,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
RNNTLoss.__init__ = dropping_support(RNNTLoss.__init__)
__all__ = [
"AddNoise",
"AmplitudeToDB",
"ComputeDeltas",
"Convolve",
"Deemphasis",
"Fade",
"FFTConvolve",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"Preemphasis",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpecAugment",
"SpectralCentroid",
"Spectrogram",
"Speed",
"SpeedPerturbation",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AddNoise,
AmplitudeToDB,
ComputeDeltas,
Convolve,
Deemphasis,
Fade,
FFTConvolve,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Preemphasis,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpecAugment,
SpectralCentroid,
Spectrogram,
Speed,
SpeedPerturbation,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AddNoise",
"AmplitudeToDB",
"ComputeDeltas",
"Convolve",
"Deemphasis",
"Fade",
"FFTConvolve",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"Preemphasis",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpecAugment",
"SpectralCentroid",
"Spectrogram",
"Speed",
"SpeedPerturbation",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
try:
import torch_npu # noqa: F401
# Enable operator support for dynamic shape and
# binary operator support on the NPU.
torch.npu.set_compile_mode(jit_compile=False)
except Exception:
return False
return hasattr(torch, 'npu') and torch.npu.is_available()
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
if is_npu_available():
return 'npu'
elif is_cuda_available():
return 'cuda'
elif is_mlu_available():
return 'mlu'
elif is_mps_available():
return 'mps'
else:
return 'cpu'
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
def get_max_cuda_memory(device: Optional[torch.device] = None) -> int:
"""Returns the maximum GPU memory occupied by tensors in megabytes (MB) for
a given device. By default, this returns the peak allocated memory since
the beginning of this program.
Args:
device (torch.device, optional): selected device. Returns
statistic for the current device, given by
:func:`~torch.cuda.current_device`, if ``device`` is None.
Defaults to None.
Returns:
int: The maximum GPU memory occupied by tensors in megabytes
for a given device.
"""
mem = torch.cuda.max_memory_allocated(device=device)
mem_mb = torch.tensor([int(mem) // (1024 * 1024)],
dtype=torch.int,
device=device)
torch.cuda.reset_peak_memory_stats()
return int(mem_mb.item())
def is_cuda_available() -> bool:
"""Returns True if cuda devices exist."""
return torch.cuda.is_available()
def is_npu_available() -> bool:
"""Returns True if Ascend PyTorch and npu devices exist."""
try:
import torch_npu # noqa: F401
except Exception:
return False
return hasattr(torch, 'npu') and torch.npu.is_available()
def is_mlu_available() -> bool:
"""Returns True if Cambricon PyTorch and mlu devices exist."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def is_mps_available() -> bool:
"""Return True if mps devices exist.
It's specialized for mac m1 chips and require torch version 1.12 or higher.
"""
return hasattr(torch.backends, 'mps') and torch.backends.mps.is_available()
def get_device() -> str:
"""Returns the currently existing device type.
Returns:
str: cuda | npu | mlu | mps | cpu.
"""
if is_npu_available():
return 'npu'
elif is_cuda_available():
return 'cuda'
elif is_mlu_available():
return 'mlu'
elif is_mps_available():
return 'mps'
else:
return 'cpu'
|
_base_ = './reppoints-moment_r50_fpn-gn_head-gn_1x_coco.py'
model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True))
|
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py'
model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2ndarray,
multi_apply, select_single_mlvl, stack_batch, unmap)
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptSampleList,
SampleList, SamplingResultList)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed', 'stack_batch',
'levels_to_images', 'ConfigType', 'OptConfigType', 'MultiConfig',
'OptMultiConfig', 'InstanceList', 'OptInstanceList', 'SampleList',
'OptSampleList', 'SamplingResultList'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean, sync_random_seed)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, levels_to_images, mask2ndarray,
multi_apply, select_single_mlvl, stack_batch, unmap)
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptSampleList,
SampleList)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk', 'sync_random_seed', 'stack_batch',
'levels_to_images', 'ConfigType', 'OptConfigType', 'MultiConfig',
'OptMultiConfig', 'InstanceList', 'OptInstanceList', 'SampleList',
'OptSampleList'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (apply_to, check_prerequisites, concat_list,
deprecated_api_warning, deprecated_function,
get_object_from_string, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, get_installed_path, install_package,
is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path', 'install_package',
'is_abs', 'is_method_overridden', 'has_method', 'digit_version',
'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer', 'check_time',
'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress', 'deprecated_function',
'apply_to', 'get_object_from_string'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (apply_to, check_prerequisites, concat_list,
deprecated_api_warning, deprecated_function, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, get_installed_path, install_package,
is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path', 'install_package',
'is_abs', 'is_method_overridden', 'has_method', 'digit_version',
'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer', 'check_time',
'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress', 'deprecated_function',
'apply_to'
]
|
# In[1]:
import pandas as pd
# In[2]:
# from https://github.com/pytorch/audio/blob/main/.github/process_commit.py
primary_labels_mapping = {
"BC-breaking": "Backward-incompatible changes",
"deprecation": "Deprecations",
"bug fix": "Bug Fixes",
"new feature": "New Features",
"improvement": "Improvements",
"prototype": "Prototypes",
"other": "Other",
"None": "Missing",
}
secondary_labels_mapping = {
"module: io": "I/O",
"module: ops": "Ops",
"module: models": "Models",
"module: pipelines": "Pipelines",
"module: datasets": "Datasets",
"module: docs": "Documentation",
"module: tests": "Tests",
"tutorial": "Tutorials",
"recipe": "Recipes",
"example": "Examples",
"build": "Build",
"style": "Style",
"perf": "Performance",
"other": "Other",
"None": "Missing",
}
# In[3]:
df = pd.read_json("data.json").T
df.tail()
# In[4]:
def get_labels(col_name, labels):
df[col_name] = [[] for _ in range(len(df))]
for _, row in df.iterrows():
row[col_name] = "None"
for label in labels:
if label in row["labels"]:
row[col_name] = label
break
# In[5]:
get_labels("primary_label", primary_labels_mapping.keys())
get_labels("secondary_label", secondary_labels_mapping.keys())
df.tail(5)
# In[6]:
for primary_label in primary_labels_mapping.keys():
primary_df = df[df["primary_label"] == primary_label]
if primary_df.empty:
continue
print(f"## {primary_labels_mapping[primary_label]}")
for secondary_label in secondary_labels_mapping.keys():
secondary_df = primary_df[primary_df["secondary_label"] == secondary_label]
if secondary_df.empty:
continue
print(f"### {secondary_labels_mapping[secondary_label]}")
for _, row in secondary_df.iterrows():
print(f"- {row['title']}")
print()
print()
|
# In[1]:
import pandas as pd
# In[2]:
# from https://github.com/pytorch/audio/blob/main/.github/process_commit.py
primary_labels_mapping = {
"BC-breaking": "Backward-incompatible changes",
"deprecation": "Deprecations",
"bug fix": "Bug Fixes",
"new feature": "New Features",
"improvement": "Improvements",
"example": "Examples",
"prototype": "Prototypes",
"other": "Other",
"None": "Missing",
}
secondary_labels_mapping = {
"module: I/O": "I/O",
"module: ops": "Ops",
"module: models": "Models",
"module: pipelines": "Pipelines",
"module: datasets": "Datasets",
"module: docs": "Documentation",
"module: tests": "Tests",
"build": "Build",
"style": "Style",
"perf": "Performance",
"other": "Other",
"None": "Missing",
}
# In[3]:
df = pd.read_json("data.json").T
df.tail()
# In[4]:
def get_labels(col_name, labels):
df[col_name] = [[] for _ in range(len(df))]
for _, row in df.iterrows():
row[col_name] = "None"
for label in labels:
if label in row["labels"]:
row[col_name] = label
break
# In[5]:
get_labels("primary_label", primary_labels_mapping.keys())
get_labels("secondary_label", secondary_labels_mapping.keys())
df.tail(5)
# In[6]:
for primary_label in primary_labels_mapping.keys():
primary_df = df[df["primary_label"] == primary_label]
if primary_df.empty:
continue
print(f"## {primary_labels_mapping[primary_label]}")
for secondary_label in secondary_labels_mapping.keys():
secondary_df = primary_df[primary_df["secondary_label"] == secondary_label]
if secondary_df.empty:
continue
print(f"### {secondary_labels_mapping[secondary_label]}")
for _, row in secondary_df.iterrows():
print(f"- {row['title']}")
print()
print()
|
from __future__ import annotations
from collections.abc import Iterable
import torch.nn as nn
from torch import Tensor
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCosineSimilarityLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCosineSimilarityLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
import torch.nn as nn
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
|
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
|
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal, TypeAlias
import numpy as np
Device: TypeAlias = Literal["cpu"]
if TYPE_CHECKING:
# NumPy 1.x on Python 3.10 fails to parse np.dtype[]
DType: TypeAlias = np.dtype[
np.bool_
| np.integer[Any]
| np.float32
| np.float64
| np.complex64
| np.complex128
]
Array: TypeAlias = np.ndarray[Any, DType]
else:
DType: TypeAlias = np.dtype
Array: TypeAlias = np.ndarray
__all__ = ["Array", "DType", "Device"]
_all_ignore = ["np"]
def __dir__() -> list[str]:
return __all__
|
from __future__ import annotations
__all__ = [
"ndarray",
"Device",
"Dtype",
]
import sys
from typing import (
Literal,
Union,
TYPE_CHECKING,
)
from numpy import (
ndarray,
dtype,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
)
Device = Literal["cpu"]
if TYPE_CHECKING or sys.version_info >= (3, 9):
Dtype = dtype[Union[
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
]]
else:
Dtype = dtype
|
# mypy: enable-error-code=unused-ignore
from typing_extensions import assert_type, Never
from torch import Size
class ZeroIndex:
def __index__(self) -> int:
return 0
tup0: tuple[()] = ()
tup1: tuple[int] = (1,)
tup2: tuple[int, int] = (1, 2)
tupN: tuple[int, int, int] = (1, 2, 3)
tupX: tuple[Never, ...] = tuple()
s = Size([1, 2, 3])
# assignability to tuple
t: tuple[int, ...] = s
# __getitem__
assert_type(s[0], int)
assert_type(s[ZeroIndex()], int)
assert_type(s[:2], Size)
# __add__
assert_type(s + s, Size)
assert_type(s + tup0, Size)
assert_type(s + tup1, Size)
assert_type(s + tup2, Size)
assert_type(s + tupN, Size)
assert_type(s + tupX, Size)
# __radd__
# NOTE: currently incorrect inference, see: https://github.com/python/mypy/issues/19006
assert_type(tup0 + s, Size) # type: ignore[assert-type]
assert_type(tup1 + s, Size) # type: ignore[assert-type]
assert_type(tup2 + s, Size) # type: ignore[assert-type]
assert_type(tupN + s, Size) # type: ignore[assert-type]
assert_type(tupX + s, Size) # type: ignore[assert-type]
# __mul__
assert_type(s * 3, Size)
assert_type(s * ZeroIndex(), Size)
assert_type(3 * s, Size)
assert_type(ZeroIndex() * s, Size)
|
from typing_extensions import assert_type
from torch import Size
s1 = Size([1, 2, 3])
s2 = Size([1, 2, 3])
class ZeroIndex:
def __index__(self) -> int:
return 0
# __getitem__
assert_type(s1[0], int)
assert_type(s1[ZeroIndex()], int)
assert_type(s1[:2], Size)
# __add__
assert_type(s1 + s2, Size)
assert_type(s1 + (1, 2), Size)
# Size has no __radd__, so tuple.__add__(right, left) is called
assert_type((1, 2) + s1, tuple[int, ...])
# __mul__
assert_type(s1 * 3, Size)
assert_type(s1 * ZeroIndex(), Size)
assert_type(3 * s1, Size)
assert_type(ZeroIndex() * s1, Size)
|
"""Google Search tool spec."""
import urllib.parse
from typing import Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
class GoogleSearchToolSpec(BaseToolSpec):
"""Google Search tool spec."""
spec_functions = ["google_search"]
def __init__(self, key: str, engine: str, num: Optional[int] = None) -> None:
"""Initialize with parameters."""
self.key = key
self.engine = engine
self.num = num
def google_search(self, query: str):
"""
Make a query to the Google search engine to receive a list of results.
Args:
query (str): The query to be passed to Google search.
num (int, optional): The number of search results to return. Defaults to None.
Raises:
ValueError: If the 'num' is not an integer between 1 and 10.
"""
url = QUERY_URL_TMPL.format(
key=self.key, engine=self.engine, query=urllib.parse.quote_plus(query)
)
if self.num is not None:
if not 1 <= self.num <= 10:
raise ValueError("num should be an integer between 1 and 10, inclusive")
url += f"&num={self.num}"
response = requests.get(url)
return [Document(text=response.text)]
|
"""Google Search tool spec."""
import urllib.parse
from typing import Optional
import requests
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
QUERY_URL_TMPL = (
"https://www.googleapis.com/customsearch/v1?key={key}&cx={engine}&q={query}"
)
class GoogleSearchToolSpec(BaseToolSpec):
"""Google Search tool spec."""
spec_functions = ["google_search"]
def __init__(self, key: str, engine: str, num: Optional[int] = None) -> None:
"""Initialize with parameters."""
self.key = key
self.engine = engine
self.num = num
def google_search(self, query: str):
"""
Make a query to the Google search engine to receive a list of results.
Args:
query (str): The query to be passed to Google search.
num (int, optional): The number of search results to return. Defaults to None.
Raises:
ValueError: If the 'num' is not an integer between 1 and 10.
"""
url = QUERY_URL_TMPL.format(
key=self.key, engine=self.engine, query=urllib.parse.quote_plus(query)
)
if self.num is not None:
if not 1 <= self.num <= 10:
raise ValueError("num should be an integer between 1 and 10, inclusive")
url += f"&num={self.num}"
response = requests.get(url)
return [Document(text=response.text)]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.model import is_model_wrapper
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmengine.runner.checkpoint import load_checkpoint
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn, visualization, and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMTrack test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--detector', help='detection checkpoint file')
parser.add_argument('--reid', help='reid checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmtrack into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
if is_model_wrapper(runner.model):
model = runner.model.module
else:
model = runner.model
if args.detector:
assert not (args.checkpoint and args.detector), \
'Error: checkpoint and detector checkpoint cannot both exist'
load_checkpoint(model.detector, args.detector)
if args.reid:
assert not (args.checkpoint and args.reid), \
'Error: checkpoint and reid checkpoint cannot both exist'
load_checkpoint(model.reid, args.reid)
# start testing
runner.test()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.model import is_model_wrapper
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmengine.runner.checkpoint import load_checkpoint
from mmdet.utils import register_all_modules
# TODO: support fuse_conv_bn, visualization, and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMTrack test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--detector', help='detection checkpoint file')
parser.add_argument('--reid', help='reid checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmtrack into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
if is_model_wrapper(runner.model):
model = runner.model.module
else:
model = runner.model
if args.detector:
assert not (args.checkpoint and args.detector), \
'Error: checkpoint and detector checkpoint cannot both exist'
load_checkpoint(model.detector, args.detector)
if args.reid:
assert not (args.checkpoint and args.reid), \
'Error: checkpoint and reid checkpoint cannot both exist'
load_checkpoint(model.reid, args.reid)
# start testing
runner.test()
if __name__ == '__main__':
main()
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = (
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"get_config_list",
"aadd",
"add",
)
_dynamic_imports = {
"chain": "base",
"Runnable": "base",
"RunnableBinding": "base",
"RunnableGenerator": "base",
"RunnableLambda": "base",
"RunnableMap": "base",
"RunnableParallel": "base",
"RunnableSequence": "base",
"RunnableSerializable": "base",
"RunnableBranch": "branch",
"RunnableConfig": "config",
"ensure_config": "config",
"get_config_list": "config",
"patch_config": "config",
"run_in_executor": "config",
"RunnableWithFallbacks": "fallbacks",
"RunnableWithMessageHistory": "history",
"RunnableAssign": "passthrough",
"RunnablePassthrough": "passthrough",
"RunnablePick": "passthrough",
"RouterInput": "router",
"RouterRunnable": "router",
"AddableDict": "utils",
"ConfigurableField": "utils",
"ConfigurableFieldMultiOption": "utils",
"ConfigurableFieldSingleOption": "utils",
"ConfigurableFieldSpec": "utils",
"aadd": "utils",
"add": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""LangChain **Runnable** and the **LangChain Expression Language (LCEL)**.
The LangChain Expression Language (LCEL) offers a declarative method to build
production-grade programs that harness the power of LLMs.
Programs created using LCEL and LangChain Runnables inherently support
synchronous, asynchronous, batch, and streaming operations.
Support for **async** allows servers hosting LCEL based programs to scale better
for higher concurrent loads.
**Batch** operations allow for processing multiple inputs in parallel.
**Streaming** of intermediate outputs, as they're being generated, allows for
creating more responsive UX.
This module contains schema and implementation of LangChain Runnables primitives.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.runnables.base import (
Runnable,
RunnableBinding,
RunnableGenerator,
RunnableLambda,
RunnableMap,
RunnableParallel,
RunnableSequence,
RunnableSerializable,
chain,
)
from langchain_core.runnables.branch import RunnableBranch
from langchain_core.runnables.config import (
RunnableConfig,
ensure_config,
get_config_list,
patch_config,
run_in_executor,
)
from langchain_core.runnables.fallbacks import RunnableWithFallbacks
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables.passthrough import (
RunnableAssign,
RunnablePassthrough,
RunnablePick,
)
from langchain_core.runnables.router import RouterInput, RouterRunnable
from langchain_core.runnables.utils import (
AddableDict,
ConfigurableField,
ConfigurableFieldMultiOption,
ConfigurableFieldSingleOption,
ConfigurableFieldSpec,
aadd,
add,
)
__all__ = [
"chain",
"AddableDict",
"ConfigurableField",
"ConfigurableFieldSingleOption",
"ConfigurableFieldMultiOption",
"ConfigurableFieldSpec",
"ensure_config",
"run_in_executor",
"patch_config",
"RouterInput",
"RouterRunnable",
"Runnable",
"RunnableSerializable",
"RunnableBinding",
"RunnableBranch",
"RunnableConfig",
"RunnableGenerator",
"RunnableLambda",
"RunnableMap",
"RunnableParallel",
"RunnablePassthrough",
"RunnableAssign",
"RunnablePick",
"RunnableSequence",
"RunnableWithFallbacks",
"RunnableWithMessageHistory",
"get_config_list",
"aadd",
"add",
]
_dynamic_imports = {
"chain": "base",
"Runnable": "base",
"RunnableBinding": "base",
"RunnableGenerator": "base",
"RunnableLambda": "base",
"RunnableMap": "base",
"RunnableParallel": "base",
"RunnableSequence": "base",
"RunnableSerializable": "base",
"RunnableBranch": "branch",
"RunnableConfig": "config",
"ensure_config": "config",
"get_config_list": "config",
"patch_config": "config",
"run_in_executor": "config",
"RunnableWithFallbacks": "fallbacks",
"RunnableWithMessageHistory": "history",
"RunnableAssign": "passthrough",
"RunnablePassthrough": "passthrough",
"RunnablePick": "passthrough",
"RouterInput": "router",
"RouterRunnable": "router",
"AddableDict": "utils",
"ConfigurableField": "utils",
"ConfigurableFieldMultiOption": "utils",
"ConfigurableFieldSingleOption": "utils",
"ConfigurableFieldSpec": "utils",
"aadd": "utils",
"add": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from typing import MutableSequence, TYPE_CHECKING, Union, Iterable
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import T
class BaseDocumentArray(MutableSequence[Document]):
def __init__(self, *args, storage: str = 'memory', **kwargs):
super().__init__()
self._init_storage(*args, **kwargs)
def __add__(self: 'T', other: Union['Document', Iterable['Document']]) -> 'T':
v = type(self)(self)
v.extend(other)
return v
|
from typing import MutableSequence, TYPE_CHECKING, Union, Iterable
from .. import Document
if TYPE_CHECKING:
from ..typing import T
class BaseDocumentArray(MutableSequence[Document]):
def __init__(self, *args, storage: str = 'memory', **kwargs):
super().__init__()
self._init_storage(*args, **kwargs)
def __add__(self: 'T', other: Union['Document', Iterable['Document']]) -> 'T':
v = type(self)(self)
v.extend(other)
return v
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIM_WRAPPER_CONSTRUCTORS = Registry('optimizer wrapper constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
# manage optimizer wrapper
OPTIM_WRAPPERS = Registry('optim_wrapper')
|
# Copyright (c) OpenMMLab. All rights reserved.
"""MMEngine provides 11 root registries to support using modules across
projects.
More datails can be found at
https://mmengine.readthedocs.io/en/latest/tutorials/registry.html.
"""
from .registry import Registry
# manage all kinds of runners like `EpochBasedRunner` and `IterBasedRunner`
RUNNERS = Registry('runner')
# manage runner constructors that define how to initialize runners
RUNNER_CONSTRUCTORS = Registry('runner constructor')
# manage all kinds of loops like `EpochBasedTrainLoop`
LOOPS = Registry('loop')
# manage all kinds of hooks like `CheckpointHook`
HOOKS = Registry('hook')
# manage data-related modules
DATASETS = Registry('dataset')
DATA_SAMPLERS = Registry('data sampler')
TRANSFORMS = Registry('transform')
# mangage all kinds of modules inheriting `nn.Module`
MODELS = Registry('model')
# mangage all kinds of model wrappers like 'MMDistributedDataParallel'
MODEL_WRAPPERS = Registry('model_wrapper')
# mangage all kinds of weight initialization modules like `Uniform`
WEIGHT_INITIALIZERS = Registry('weight initializer')
# mangage all kinds of optimizers like `SGD` and `Adam`
OPTIMIZERS = Registry('optimizer')
# manage constructors that customize the optimization hyperparameters.
OPTIMIZER_CONSTRUCTORS = Registry('optimizer constructor')
# mangage all kinds of parameter schedulers like `MultiStepLR`
PARAM_SCHEDULERS = Registry('parameter scheduler')
# manage all kinds of metrics
METRICS = Registry('metric')
# manage task-specific modules like anchor generators and box coders
TASK_UTILS = Registry('task util')
# manage visualizer
VISUALIZERS = Registry('visualizer')
# manage visualizer backend
VISBACKENDS = Registry('vis_backend')
# manage logprocessor
LOG_PROCESSORS = Registry('log_processor')
|
from langchain_core.tracers.schemas import (
BaseRun,
ChainRun,
LLMRun,
Run,
RunTypeEnum,
ToolRun,
TracerSession,
TracerSessionBase,
TracerSessionV1,
TracerSessionV1Base,
TracerSessionV1Create,
)
__all__ = [
"BaseRun",
"ChainRun",
"LLMRun",
"Run",
"RunTypeEnum",
"ToolRun",
"TracerSession",
"TracerSessionBase",
"TracerSessionV1",
"TracerSessionV1Base",
"TracerSessionV1Create",
]
|
from langchain_core.tracers.schemas import (
BaseRun,
ChainRun,
LLMRun,
Run,
RunTypeEnum,
ToolRun,
TracerSession,
TracerSessionBase,
TracerSessionV1,
TracerSessionV1Base,
TracerSessionV1Create,
)
__all__ = [
"RunTypeEnum",
"TracerSessionV1Base",
"TracerSessionV1Create",
"TracerSessionV1",
"TracerSessionBase",
"TracerSession",
"BaseRun",
"LLMRun",
"ChainRun",
"ToolRun",
"Run",
]
|
import warnings
from unittest import mock
import numpy as np
from conftest import skip_if_backend
from keras.src import backend
from keras.src import callbacks
from keras.src import layers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.utils import numerical_utils
try:
import requests
except ImportError:
requests = None
class TerminateOnNaNTest(testing.TestCase):
def test_RemoteMonitor(self):
if requests is None:
self.skipTest("`requests` required to run this test")
monitor = callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
warning_msg = "Could not reach RemoteMonitor root server"
with warnings.catch_warnings(record=True) as warning_logs:
warnings.simplefilter("always")
monitor.on_epoch_end(0, logs={"loss": 0.0})
self.assertIn(warning_msg, str(warning_logs[-1].message))
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
@skip_if_backend(
"openvino", "openvino backend does not support `fit` method"
)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest("`requests` required to run this test")
if backend.backend() == "numpy":
self.skipTest("Trainer not implemented from NumPy backend.")
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
INPUT_DIM = 3
NUM_CLASSES = 2
BATCH_SIZE = 4
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential([layers.Dense(NUM_CLASSES)])
model.compile(loss="mean_squared_error", optimizer="sgd")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
hist = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[monitor],
epochs=1,
)
send = {
"epoch": 0,
"loss": hist.history["loss"][0],
"val_loss": hist.history["val_loss"][0],
}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
|
import warnings
from unittest import mock
import numpy as np
from keras.src import backend
from keras.src import callbacks
from keras.src import layers
from keras.src import testing
from keras.src.models import Sequential
from keras.src.utils import numerical_utils
try:
import requests
except ImportError:
requests = None
class TerminateOnNaNTest(testing.TestCase):
def test_RemoteMonitor(self):
if requests is None:
self.skipTest("`requests` required to run this test")
monitor = callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
warning_msg = "Could not reach RemoteMonitor root server"
with warnings.catch_warnings(record=True) as warning_logs:
warnings.simplefilter("always")
monitor.on_epoch_end(0, logs={"loss": 0.0})
self.assertIn(warning_msg, str(warning_logs[-1].message))
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest("`requests` required to run this test")
if backend.backend() == "numpy":
self.skipTest("Trainer not implemented from NumPy backend.")
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
INPUT_DIM = 3
NUM_CLASSES = 2
BATCH_SIZE = 4
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential([layers.Dense(NUM_CLASSES)])
model.compile(loss="mean_squared_error", optimizer="sgd")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
hist = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[monitor],
epochs=1,
)
send = {
"epoch": 0,
"loss": hist.history["loss"][0],
"val_loss": hist.history["val_loss"][0],
}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import CSRReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSRLoss implements a combined loss function for Contrastive Sparse Representation (CSR) models.
This loss combines two components:
1. A reconstruction loss :class:`CSRReconstructionLoss` that ensures the sparse representation can faithfully
reconstruct the original embedding.
2. A contrastive learning component :class:`SparseMultipleNegativesRankingLoss` that ensures semantically
similar sentences have similar representations.
The total loss is linear combination of the two losses.
Args:
model: SparseEncoder model
beta: Weight for the L_aux component in the reconstruction loss
gamma: Weight for the contrastive MRL loss component
scale: Scale factor for the similarity scores in the MRL loss
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. Sentence pairs or triplets for the MRL component
2. Uses autoencoder components of the SparseEncoder model
Relations:
- Uses :class:`CSRReconstructionLoss` for the reconstruction component
- Uses :class:`SparseMultipleNegativesRankingLoss` for the contrastive component
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("sentence-transformers/all-MiniLM-L6-v2")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
}
)
loss = losses.CSRLoss(model, beta=0.1, gamma=1.0, scale=20.0)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
def __init__(self, model: SparseEncoder, beta: float = 0.1, gamma: float = 1.0, scale: float = 20.0):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = CSRReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self, sentence_features: Iterable[dict[str, torch.Tensor]], labels: torch.Tensor = None
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sentence_embedding = [output["sentence_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sentence_embedding, labels)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta, "gamma": self.gamma, "scale": self.scale}
@property
def citation(self) -> str:
return """
@misc{wen2025matryoshkarevisitingsparsecoding,
title={Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation},
author={Tiansheng Wen and Yifei Wang and Zequn Zeng and Zhong Peng and Yudi Su and Xinyang Liu and Bo Chen and Hongwei Liu and Stefanie Jegelka and Chenyu You},
year={2025},
eprint={2503.01776},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2503.01776},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
from sentence_transformers.sparse_encoder.losses.CSRReconstructionLoss import CSRReconstructionLoss
from sentence_transformers.sparse_encoder.losses.SparseMultipleNegativesRankingLoss import (
SparseMultipleNegativesRankingLoss,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class CSRLoss(nn.Module):
"""
CSR Loss module that combines the CSRReconstruction Loss and Sparse Multiple Negatives Ranking Loss MRL (InfoNCE).
Based on the paper:
Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation, https://arxiv.org/abs/2503.01776
This module computes the combined loss according to the formula:
L_CSR = L_recon + γ * L_MRL
where:
- L_recon = L(k) + L(4k)/8 + β*L_aux
- L_MRL is the Multiple Negatives Ranking Loss
"""
def __init__(self, model: SparseEncoder, beta: float = 0.1, gamma: float = 1.0, scale: float = 20.0):
super().__init__()
self.model = model
self.beta = beta
self.gamma = gamma
self.scale = scale
# Initialize the component losses
self.reconstruction_loss = CSRReconstructionLoss(model, beta)
self.ranking_loss = SparseMultipleNegativesRankingLoss(model, scale)
def forward(
self, sentence_features: Iterable[dict[str, torch.Tensor]], labels: torch.Tensor = None
) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSR Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings
labels: Optional tensor of labels (not used in this implementation)
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
sentence_embedding = [output["sentence_embedding"] for output in outputs]
recon_loss = self.reconstruction_loss.compute_loss_from_embeddings(outputs)
ranking_loss = self.ranking_loss.compute_loss_from_embeddings(sentence_embedding, labels)
# Compute total loss: L_CSR = L_recon + γ * L_MRL
total_loss = recon_loss + self.gamma * ranking_loss
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta, "gamma": self.gamma, "scale": self.scale}
@property
def citation(self) -> str:
return """
@misc{wen2025matryoshkarevisitingsparsecoding,
title={Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation},
author={Tiansheng Wen and Yifei Wang and Zequn Zeng and Zhong Peng and Yudi Su and Xinyang Liu and Bo Chen and Hongwei Liu and Stefanie Jegelka and Chenyu You},
year={2025},
eprint={2503.01776},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2503.01776},
}
"""
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[2, 4, 6, 8]),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001),
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00066667,
by_epoch=False,
begin=0,
end=1500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='RandomShift', prob=0.5, max_shift_px=32),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8, num_workers=8, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='YOLOF',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='DilatedEncoder',
in_channels=2048,
out_channels=512,
block_mid_channels=128,
num_residual_blocks=4,
block_dilations=[2, 4, 6, 8]),
bbox_head=dict(
type='YOLOFHead',
num_classes=80,
in_channels=512,
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(
type='SGD',
lr=0.12,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(
norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)}))
lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667)
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKLWan,
FlowMatchEulerDiscreteScheduler,
WanPipeline,
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import (
floats_tensor,
require_peft_backend,
skip_mps,
)
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
@skip_mps
class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = WanPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 16,
"out_channels": 16,
"text_dim": 32,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
transformer_cls = WanTransformer3DModel
vae_kwargs = {
"base_dim": 3,
"z_dim": 16,
"dim_mult": [1, 1, 1, 1],
"num_res_blocks": 1,
"temperal_downsample": [False, True, True],
}
vae_cls = AutoencoderKLWan
has_two_text_encoders = True
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
text_encoder_target_modules = ["q", "k", "v", "o"]
@property
def output_shape(self):
return (1, 9, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
num_frames = 9
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
sizes = (4, 4)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"num_frames": num_frames,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in Wan.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKLWan,
FlowMatchEulerDiscreteScheduler,
WanPipeline,
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import floats_tensor, require_peft_backend, skip_mps
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests # noqa: E402
@require_peft_backend
@skip_mps
class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
pipeline_class = WanPipeline
scheduler_cls = FlowMatchEulerDiscreteScheduler
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
scheduler_kwargs = {}
transformer_kwargs = {
"patch_size": (1, 2, 2),
"num_attention_heads": 2,
"attention_head_dim": 12,
"in_channels": 16,
"out_channels": 16,
"text_dim": 32,
"freq_dim": 256,
"ffn_dim": 32,
"num_layers": 2,
"cross_attn_norm": True,
"qk_norm": "rms_norm_across_heads",
"rope_max_seq_len": 32,
}
transformer_cls = WanTransformer3DModel
vae_kwargs = {
"base_dim": 3,
"z_dim": 16,
"dim_mult": [1, 1, 1, 1],
"num_res_blocks": 1,
"temperal_downsample": [False, True, True],
}
vae_cls = AutoencoderKLWan
has_two_text_encoders = True
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
text_encoder_target_modules = ["q", "k", "v", "o"]
@property
def output_shape(self):
return (1, 9, 32, 32, 3)
def get_dummy_inputs(self, with_generator=True):
batch_size = 1
sequence_length = 16
num_channels = 4
num_frames = 9
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
sizes = (4, 4)
generator = torch.manual_seed(0)
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
pipeline_inputs = {
"prompt": "",
"num_frames": num_frames,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"height": 32,
"width": 32,
"max_sequence_length": sequence_length,
"output_type": "np",
}
if with_generator:
pipeline_inputs.update({"generator": generator})
return noise, input_ids, pipeline_inputs
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
def test_simple_inference_with_text_denoiser_lora_unfused(self):
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale(self):
pass
@unittest.skip("Not supported in Wan.")
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
pass
@unittest.skip("Not supported in Wan.")
def test_modify_padding_mode(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_partial_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_and_scale(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_fused(self):
pass
@unittest.skip("Text encoder LoRA is not supported in Wan.")
def test_simple_inference_with_text_lora_save_load(self):
pass
|
import asyncio
from itertools import cycle
from typing import Any, Optional, Union
from uuid import UUID
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from typing_extensions import override
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
class MyCustomAsyncHandler(AsyncCallbackHandler):
@override
async def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
@override
async def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 1000))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark
def async_callbacks() -> None:
for _ in range(10):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
# ruff: noqa: ARG002
import asyncio
from itertools import cycle
from typing import Any
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore
from langchain_core.callbacks.base import AsyncCallbackHandler
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
class MyCustomAsyncHandler(AsyncCallbackHandler):
async def on_chat_model_start(
self,
serialized: Any,
messages: Any,
*,
run_id: Any,
parent_run_id: Any = None,
tags: Any = None,
metadata: Any = None,
**kwargs: Any,
) -> Any:
# Do nothing
# Required to implement since this is an abstract method
pass
async def on_llm_new_token(
self,
token: str,
*,
chunk: Any = None,
run_id: Any,
parent_run_id: Any = None,
tags: Any = None,
**kwargs: Any,
) -> None:
await asyncio.sleep(0)
@pytest.mark.benchmark
async def test_async_callbacks(benchmark: BenchmarkFixture) -> None:
infinite_cycle = cycle([AIMessage(content=" ".join(["hello", "goodbye"] * 1000))])
model = GenericFakeChatModel(messages=infinite_cycle)
@benchmark
def async_callbacks() -> None:
for _ in range(10):
for _ in model.stream("meow", {"callbacks": [MyCustomAsyncHandler()]}):
pass
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""Create a Dataset for *YesNo* [:footcite:`YesNo`].
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, List[int]): ``(waveform, sample_rate, labels)``
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "waves_yesno",
"url": "http://www.openslr.org/resources/1/waves_yesno.tar.gz",
"checksum": "c3f49e0cca421f96b75b41640749167b52118f232498667ca7a5f9416aef8e73",
}
}
class YESNO(Dataset):
"""Create a Dataset for *YesNo* [:footcite:`YesNo`].
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://www.openslr.org/resources/1/waves_yesno.tar.gz"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"waves_yesno"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(
self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False,
) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
archive = os.path.basename(url)
archive = root / archive
self._path = root / folder_in_archive
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url_to_file(url, archive, hash_prefix=checksum)
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
self._walker = sorted(str(p.stem) for p in Path(self._path).glob("*.wav"))
def _load_item(self, fileid: str, path: str):
labels = [int(c) for c in fileid.split("_")]
file_audio = os.path.join(path, fileid + ".wav")
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, labels
def __getitem__(self, n: int) -> Tuple[Tensor, int, List[int]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, List[int]): ``(waveform, sample_rate, labels)``
"""
fileid = self._walker[n]
item = self._load_item(fileid, self._path)
return item
def __len__(self) -> int:
return len(self._walker)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
import logging
import sys
import os
import tarfile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
import logging
import sys
import os
import tarfile
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get("https://msmarco.blob.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.blob.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
from typing import Iterable, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from requests.exceptions import HTTPError, ReadTimeout
from urllib3.exceptions import ConnectionError
from langchain_community.document_loaders.web_base import WebBaseLoader
class YahooFinanceNewsInput(BaseModel):
"""Input for the YahooFinanceNews tool."""
query: str = Field(description="company ticker query to look up")
class YahooFinanceNewsTool(BaseTool):
"""Tool that searches financial news on Yahoo Finance."""
name: str = "yahoo_finance_news"
description: str = (
"Useful for when you need to find financial news "
"about a public company. "
"Input should be a company ticker. "
"For example, AAPL for Apple, MSFT for Microsoft."
)
top_k: int = 10
"""The number of results to return."""
args_schema: Type[BaseModel] = YahooFinanceNewsInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""
Use the Yahoo Finance News tool.
Args:
query: Company ticker symbol (e.g., 'AAPL' for Apple).
run_manager: Optional callback manager.
Returns:
str: Formatted news results or error message.
"""
try:
import yfinance
except ImportError:
raise ImportError(
"Could not import yfinance python package. "
"Please install it with `pip install yfinance`."
)
company = yfinance.Ticker(query)
try:
if company.isin is None:
return f"Company ticker {query} not found."
except (HTTPError, ReadTimeout, ConnectionError):
return f"Company ticker {query} not found."
links = []
try:
links = [
n["content"]["canonicalUrl"]["url"]
for n in company.news
if n["content"]["contentType"] == "STORY"
]
except (HTTPError, ReadTimeout, ConnectionError):
if not links:
return f"No news found for company that searched with {query} ticker."
if not links:
return f"No news found for company that searched with {query} ticker."
loader = WebBaseLoader(web_paths=links)
docs = loader.load()
result = self._format_results(docs, query)
if not result:
return f"No news found for company that searched with {query} ticker."
return result
@staticmethod
def _format_results(docs: Iterable[Document], query: str) -> str:
doc_strings = [
"\n".join([doc.metadata["title"], doc.metadata.get("description", "")])
for doc in docs
if query in doc.metadata.get("description", "")
or query in doc.metadata["title"]
]
return "\n\n".join(doc_strings)
|
from typing import Iterable, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from requests.exceptions import HTTPError, ReadTimeout
from urllib3.exceptions import ConnectionError
from langchain_community.document_loaders.web_base import WebBaseLoader
class YahooFinanceNewsInput(BaseModel):
"""Input for the YahooFinanceNews tool."""
query: str = Field(description="company ticker query to look up")
class YahooFinanceNewsTool(BaseTool): # type: ignore[override, override]
"""Tool that searches financial news on Yahoo Finance."""
name: str = "yahoo_finance_news"
description: str = (
"Useful for when you need to find financial news "
"about a public company. "
"Input should be a company ticker. "
"For example, AAPL for Apple, MSFT for Microsoft."
)
top_k: int = 10
"""The number of results to return."""
args_schema: Type[BaseModel] = YahooFinanceNewsInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""
Use the Yahoo Finance News tool.
Args:
query: Company ticker symbol (e.g., 'AAPL' for Apple).
run_manager: Optional callback manager.
Returns:
str: Formatted news results or error message.
"""
try:
import yfinance
except ImportError:
raise ImportError(
"Could not import yfinance python package. "
"Please install it with `pip install yfinance`."
)
company = yfinance.Ticker(query)
try:
if company.isin is None:
return f"Company ticker {query} not found."
except (HTTPError, ReadTimeout, ConnectionError):
return f"Company ticker {query} not found."
links = []
try:
links = [
n["content"]["canonicalUrl"]["url"]
for n in company.news
if n["content"]["contentType"] == "STORY"
]
except (HTTPError, ReadTimeout, ConnectionError):
if not links:
return f"No news found for company that searched with {query} ticker."
if not links:
return f"No news found for company that searched with {query} ticker."
loader = WebBaseLoader(web_paths=links)
docs = loader.load()
result = self._format_results(docs, query)
if not result:
return f"No news found for company that searched with {query} ticker."
return result
@staticmethod
def _format_results(docs: Iterable[Document], query: str) -> str:
doc_strings = [
"\n".join([doc.metadata["title"], doc.metadata.get("description", "")])
for doc in docs
if query in doc.metadata.get("description", "")
or query in doc.metadata["title"]
]
return "\n\n".join(doc_strings)
|
import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F
def plot(imgs, row_title=None, **imshow_kwargs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
_, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
for col_idx, img in enumerate(row):
boxes = None
masks = None
if isinstance(img, tuple):
img, target = img
if isinstance(target, dict):
boxes = target.get("boxes")
masks = target.get("masks")
elif isinstance(target, tv_tensors.BoundingBoxes):
boxes = target
else:
raise ValueError(f"Unexpected target type: {type(target)}")
img = F.to_image(img)
if img.dtype.is_floating_point and img.min() < 0:
# Poor man's re-normalization for the colors to be OK-ish. This
# is useful for images coming out of Normalize()
img -= img.min()
img /= img.max()
img = F.to_dtype(img, torch.uint8, scale=True)
if boxes is not None:
img = draw_bounding_boxes(img, boxes, colors="yellow", width=3)
if masks is not None:
img = draw_segmentation_masks(img, masks.to(torch.bool), colors=["green"] * masks.shape[0], alpha=.65)
ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy(), **imshow_kwargs)
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if row_title is not None:
for row_idx in range(num_rows):
axs[row_idx, 0].set(ylabel=row_title[row_idx])
plt.tight_layout()
|
import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F
def plot(imgs, row_title=None, **imshow_kwargs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
_, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
for col_idx, img in enumerate(row):
boxes = None
masks = None
if isinstance(img, tuple):
img, target = img
if isinstance(target, dict):
boxes = target.get("boxes")
masks = target.get("masks")
elif isinstance(target, datapoints.BoundingBoxes):
boxes = target
else:
raise ValueError(f"Unexpected target type: {type(target)}")
img = F.to_image(img)
if img.dtype.is_floating_point and img.min() < 0:
# Poor man's re-normalization for the colors to be OK-ish. This
# is useful for images coming out of Normalize()
img -= img.min()
img /= img.max()
img = F.to_dtype(img, torch.uint8, scale=True)
if boxes is not None:
img = draw_bounding_boxes(img, boxes, colors="yellow", width=3)
if masks is not None:
img = draw_segmentation_masks(img, masks.to(torch.bool), colors=["green"] * masks.shape[0], alpha=.65)
ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy(), **imshow_kwargs)
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if row_title is not None:
for row_idx in range(num_rows):
axs[row_idx, 0].set(ylabel=row_title[row_idx])
plt.tight_layout()
|
from typing import Any, cast
import pytest
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.workflow.context_serializers import JsonSerializer
from llama_index.core.workflow.events import Event
class _TestEvent(Event):
param: str
_private_param_1: str = PrivateAttr()
_private_param_2: str = PrivateAttr(default_factory=str)
class _TestEvent2(Event):
"""Custom Test Event.
Private Attrs:
_private_param: doesn't get modified during construction
_modified_private_param: gets processed before being set
"""
_private_param: int = PrivateAttr()
_modified_private_param: int = PrivateAttr()
def __init__(self, _modified_private_param: int, **params: Any):
super().__init__(**params)
self._modified_private_param = _modified_private_param * 2
def test_event_init_basic():
evt = Event(a=1, b=2, c="c")
assert evt.a == 1
assert evt.b == 2
assert evt.c == "c"
assert evt["a"] == evt.a
assert evt["b"] == evt.b
assert evt["c"] == evt.c
assert evt.keys() == {"a": 1, "b": 2, "c": "c"}.keys()
def test_custom_event_with_fields_and_private_params():
evt = _TestEvent(a=1, param="test_param", _private_param_1="test_private_param_1") # type: ignore
assert evt.a == 1
assert evt["a"] == evt.a
assert evt.param == "test_param"
assert evt._data == {"a": 1}
assert evt._private_param_1 == "test_private_param_1"
assert evt._private_param_2 == ""
def test_custom_event_override_init():
evt = _TestEvent2(a=1, b=2, _private_param=2, _modified_private_param=2)
assert evt.a == 1
assert evt.b == 2
assert evt._data == {"a": 1, "b": 2}
assert evt._private_param == 2
assert evt._modified_private_param == 4
def test_event_missing_key():
ev = _TestEvent(param="bar")
with pytest.raises(AttributeError):
ev.wrong_key
def test_event_not_a_field():
ev = _TestEvent(param="foo", not_a_field="bar") # type: ignore
assert ev._data["not_a_field"] == "bar"
ev.not_a_field = "baz"
assert ev._data["not_a_field"] == "baz"
ev["not_a_field"] = "barbaz"
assert ev._data["not_a_field"] == "barbaz"
assert ev.get("not_a_field") == "barbaz"
def test_event_dict_api():
ev = _TestEvent(param="foo")
assert len(ev) == 0
ev["a_new_key"] = "bar"
assert len(ev) == 1
assert list(ev.values()) == ["bar"]
k, v = next(iter(ev.items()))
assert k == "a_new_key"
assert v == "bar"
assert next(iter(ev)) == "a_new_key"
assert ev.dict() == {"a_new_key": "bar"}
def test_event_serialization():
ev = _TestEvent(param="foo", not_a_field="bar") # type: ignore
serializer = JsonSerializer()
serialized_ev = serializer.serialize(ev)
deseriazlied_ev = serializer.deserialize(serialized_ev)
assert type(deseriazlied_ev).__name__ == type(ev).__name__
deseriazlied_ev = cast(
_TestEvent,
deseriazlied_ev,
)
assert ev.param == deseriazlied_ev.param
assert ev._data == deseriazlied_ev._data
def test_bool():
assert bool(_TestEvent(param="foo")) is True
|
from typing import Any, cast
import pytest
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.workflow.context_serializers import JsonSerializer
from llama_index.core.workflow.events import Event
class _TestEvent(Event):
param: str
_private_param_1: str = PrivateAttr()
_private_param_2: str = PrivateAttr(default_factory=str)
class _TestEvent2(Event):
"""Custom Test Event.
Private Attrs:
_private_param: doesn't get modified during construction
_modified_private_param: gets processed before being set
"""
_private_param: int = PrivateAttr()
_modified_private_param: int = PrivateAttr()
def __init__(self, _modified_private_param: int, **params: Any):
super().__init__(**params)
self._modified_private_param = _modified_private_param * 2
def test_event_init_basic():
evt = Event(a=1, b=2, c="c")
assert evt.a == 1
assert evt.b == 2
assert evt.c == "c"
assert evt["a"] == evt.a
assert evt["b"] == evt.b
assert evt["c"] == evt.c
assert evt.keys() == {"a": 1, "b": 2, "c": "c"}.keys()
def test_custom_event_with_fields_and_private_params():
evt = _TestEvent(a=1, param="test_param", _private_param_1="test_private_param_1") # type: ignore
assert evt.a == 1
assert evt["a"] == evt.a
assert evt.param == "test_param"
assert evt._data == {"a": 1}
assert evt._private_param_1 == "test_private_param_1"
assert evt._private_param_2 == ""
def test_custom_event_override_init():
evt = _TestEvent2(a=1, b=2, _private_param=2, _modified_private_param=2)
assert evt.a == 1
assert evt.b == 2
assert evt._data == {"a": 1, "b": 2}
assert evt._private_param == 2
assert evt._modified_private_param == 4
def test_event_missing_key():
ev = _TestEvent(param="bar")
with pytest.raises(AttributeError):
ev.wrong_key
def test_event_not_a_field():
ev = _TestEvent(param="foo", not_a_field="bar") # type: ignore
assert ev._data["not_a_field"] == "bar"
ev.not_a_field = "baz"
assert ev._data["not_a_field"] == "baz"
ev["not_a_field"] = "barbaz"
assert ev._data["not_a_field"] == "barbaz"
assert ev.get("not_a_field") == "barbaz"
def test_event_dict_api():
ev = _TestEvent(param="foo")
assert len(ev) == 0
ev["a_new_key"] = "bar"
assert len(ev) == 1
assert list(ev.values()) == ["bar"]
k, v = next(iter(ev.items()))
assert k == "a_new_key"
assert v == "bar"
assert next(iter(ev)) == "a_new_key"
assert ev.dict() == {"a_new_key": "bar"}
def test_event_serialization():
ev = _TestEvent(param="foo", not_a_field="bar") # type: ignore
serializer = JsonSerializer()
serialized_ev = serializer.serialize(ev)
deseriazlied_ev = serializer.deserialize(serialized_ev)
assert type(deseriazlied_ev).__name__ == type(ev).__name__
deseriazlied_ev = cast(
_TestEvent,
deseriazlied_ev,
)
assert ev.param == deseriazlied_ev.param
assert ev._data == deseriazlied_ev._data
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder, models
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
asym = models.Asym(
{
"query": [
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
}
)
sparse_model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(
[{"doc": doc} for doc in corpus], convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode([{"query": query} for query in queries], convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCoSENTLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCoSENTLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from __future__ import annotations
from typing import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer
class MSELoss(nn.Module):
def __init__(self, model: SentenceTransformer) -> None:
"""
Computes the MSE loss between the computed sentence embedding and a target sentence embedding. This loss
is used when extending sentence embeddings to new languages as described in our publication
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation.
For an example, see `the distillation documentation <../../examples/training/distillation/README.html>`_ on extending language models to new languages.
Args:
model: SentenceTransformerModel
References:
- Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation: https://arxiv.org/abs/2004.09813
- `Training > Model Distillation <../../examples/training/distillation/README.html>`_
- `Training > Multilingual Models <../../examples/training/multilingual/README.html>`_
Requirements:
1. Usually uses a finetuned teacher M in a knowledge distillation setup
Inputs:
+-----------------------------------------+-----------------------------+
| Texts | Labels |
+=========================================+=============================+
| sentence | model sentence embeddings |
+-----------------------------------------+-----------------------------+
| sentence_1, sentence_2, ..., sentence_N | model sentence embeddings |
+-----------------------------------------+-----------------------------+
Relations:
- :class:`MarginMSELoss` is equivalent to this loss, but with a margin through a negative pair.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
student_model = SentenceTransformer("microsoft/mpnet-base")
teacher_model = SentenceTransformer("all-mpnet-base-v2")
train_dataset = Dataset.from_dict({
"english": ["The first sentence", "The second sentence", "The third sentence", "The fourth sentence"],
"french": ["La première phrase", "La deuxième phrase", "La troisième phrase", "La quatrième phrase"],
})
def compute_labels(batch):
return {
"label": teacher_model.encode(batch["english"])
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = losses.MSELoss(student_model)
trainer = SentenceTransformerTrainer(
model=student_model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.model = model
self.loss_fct = nn.MSELoss()
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Concatenate multiple inputs on the batch dimension
if len(sentence_features) > 1:
embeddings = torch.cat([self.model(inputs)["sentence_embedding"] for inputs in sentence_features], dim=0)
# Repeat the labels for each input
return self.loss_fct(embeddings, labels.repeat(len(sentence_features), 1))
embeddings = self.model(sentence_features[0])["sentence_embedding"]
return self.loss_fct(embeddings, labels)
@property
def citation(self) -> str:
return """
@inproceedings{reimers-2020-multilingual-sentence-bert,
title = "Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2020",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/2004.09813",
}
"""
|
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
# delete the root document
self._del_docs_by_ids([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def __del__(self) -> None:
if not self._persist:
self._offset2ids.clear()
self._annlite.clear()
self._annlite.close()
super().__del__()
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
from typing import Iterable, Dict
from docarray.array.storage.annlite.helper import OffsetMapping
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document, DocumentArray
class GetSetDelMixin(BaseGetSetDelMixin):
"""Implement required and derived functions that power `getitem`, `setitem`, `delitem`"""
# essential methods start
def _get_doc_by_id(self, _id: str) -> 'Document':
doc = self._annlite.get_doc_by_id(_id)
if doc is None:
raise KeyError(f'Can not find Document with id=`{_id}`')
return doc
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
value.embedding = self._map_embedding(value.embedding)
docs = DocumentArrayInMemory([value])
self._annlite.update(docs)
def _del_doc_by_id(self, _id: str):
# delete the root document
self._del_docs_by_ids([_id])
def _clear_storage(self):
self._annlite.clear()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
for _id, doc in zip(ids, docs):
doc.embedding = self._map_embedding(doc.embedding)
self._set_doc_by_id(_id, doc)
def _del_docs_by_ids(self, ids):
self._annlite.delete(ids)
def _load_offset2ids(self):
self._offsetmapping = OffsetMapping(
data_path=self._config.data_path, in_memory=False
)
self._offsetmapping.create_table()
self._offset2ids = Offset2ID(self._offsetmapping.get_all_ids())
def _save_offset2ids(self):
self._offsetmapping.drop()
self._offsetmapping.create_table()
self._offsetmapping._insert(
[(i, doc_id) for i, doc_id in enumerate(self._offset2ids.ids)]
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.