input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# dataset settings
dataset_type = 'CocoPanopticDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
# dataset settings
dataset_type = 'CocoPanopticDataset'
# data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadPanopticAnnotations', backend_args=backend_args),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_train2017.json',
data_prefix=dict(
img='train2017/', seg='annotations/panoptic_train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/panoptic_val2017.json',
data_prefix=dict(img='val2017/', seg='annotations/panoptic_val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoPanopticMetric',
ann_file=data_root + 'annotations/panoptic_val2017.json',
seg_prefix=data_root + 'annotations/panoptic_val2017/',
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=1,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file='annotations/panoptic_image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoPanopticMetric',
# format_only=True,
# ann_file=data_root + 'annotations/panoptic_image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_panoptic/test')
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
import transformers
from PIL import Image
from sentence_transformers.models.Router import InputModule
class CLIPModel(InputModule):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.model.save_pretrained(output_path, safe_serialization=safe_serialization)
self.processor.save_pretrained(output_path)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
local_path = cls.load_dir_path(
model_name_or_path=model_name_or_path,
subfolder=subfolder,
token=token,
cache_folder=cache_folder,
revision=revision,
local_files_only=local_files_only,
)
return cls(local_path)
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
import transformers
from PIL import Image
from sentence_transformers.models.Asym import InputModule
class CLIPModel(InputModule):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.model.save_pretrained(output_path, safe_serialization=safe_serialization)
self.processor.save_pretrained(output_path)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
local_path = cls.load_dir_path(
model_name_or_path=model_name_or_path,
subfolder=subfolder,
token=token,
cache_folder=cache_folder,
revision=revision,
local_files_only=local_files_only,
)
return cls(local_path)
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = [
"aindex",
"DeleteResponse",
"DocumentIndex",
"index",
"IndexingResult",
"InMemoryRecordManager",
"RecordManager",
"UpsertResponse",
]
_dynamic_imports = {
"aindex": "api",
"index": "api",
"IndexingResult": "api",
"DeleteResponse": "base",
"DocumentIndex": "base",
"InMemoryRecordManager": "base",
"RecordManager": "base",
"UpsertResponse": "base",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Code to help indexing data into a vectorstore.
This package contains helper logic to help deal with indexing data into
a vectorstore while avoiding duplicated content and over-writing content
if it's unchanged.
"""
from langchain_core.indexing.api import IndexingResult, aindex, index
from langchain_core.indexing.base import (
DeleteResponse,
DocumentIndex,
InMemoryRecordManager,
RecordManager,
UpsertResponse,
)
__all__ = [
"aindex",
"DeleteResponse",
"DocumentIndex",
"index",
"IndexingResult",
"InMemoryRecordManager",
"RecordManager",
"UpsertResponse",
]
|
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from pathlib import Path
import torch
from utils import (
create_tsv,
dump_features,
learn_kmeans,
get_km_label,
)
def _init_logger(debug=False):
message_fmt = "%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {message_fmt}",
)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
parser.add_argument("--dataset", default="librispeech", type=str, choices=["librispeech", "librilight"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``LibriSpeech`` or ``LibriLight`` is stored.",
)
parser.add_argument("--num-rank", default=5, type=int)
parser.add_argument("--feat-type", default="mfcc", choices=["mfcc", "hubert"], type=str)
parser.add_argument(
"--layer-index",
default=6,
type=int,
help="The layer index in HuBERT model for feature extraction. (``1`` means the first layer output)",
)
parser.add_argument(
"--checkpoint-path",
default=None,
type=Path,
help="The model checkpoint of hubert_pretrain_base model.",
)
parser.add_argument("--use-gpu", default=False, type=bool)
parser.add_argument(
"--exp-dir",
type=Path,
help="The directory to store the experiment outputs.",
)
parser.add_argument(
"--num-cluster",
default=100,
type=int,
help="The number of clusters for KMeans clustering.",
)
args = parser.parse_args()
return args
def main(args):
_init_logger(args.debug)
if not args.exp_dir.exists():
args.exp_dir.mkdir()
if args.feat_type == "mfcc":
data_dir = args.exp_dir / "data" / "mfcc"
else:
data_dir = args.exp_dir / "data" / f"{args.feat_type}_{args.layer_index}"
data_dir.mkdir(parents=True, exist_ok=True)
tsv_dir = data_dir / "tsv"
feat_dir = data_dir / "feat"
km_dir = data_dir / "km_model"
label_dir = data_dir / "label"
if args.use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Create file lists for training and validation (optional)
create_tsv(args.root_dir, tsv_dir)
# Extract features for KMeans clustering
if not feat_dir.exists():
feat_dir.mkdir()
for split in ["train", "valid"]:
for rank in range(1, args.num_rank + 1):
dump_features(
tsv_dir / f"{args.dataset}_{split}.tsv",
feat_dir,
split,
rank,
args.num_rank,
device,
args.feat_type,
args.layer_index,
args.checkpoint_path,
16_000,
)
# Fit KMeans clustering model
learn_kmeans(
feat_dir,
"train",
args.num_rank,
km_dir,
args.num_cluster,
)
# Predict labels for MFCC or HuBERT features
for split in ["train", "valid"]:
get_km_label(
feat_dir,
km_dir,
label_dir,
split,
args.num_rank,
device,
)
if __name__ == "__main__":
main(_parse_args())
|
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from multiprocessing import Pool
from pathlib import Path
import torch
from utils import (
create_tsv,
dump_features,
learn_kmeans,
get_km_label,
)
def _init_logger(debug=False):
message_fmt = "%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {message_fmt}",
)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
parser.add_argument("--dataset", default="librispeech", type=str, choices=["librispeech", "librilight"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``LibriSpeech`` or ``LibriLight`` is stored.",
)
parser.add_argument("--num-rank", default=5, type=int)
parser.add_argument("--feat-type", default="mfcc", choices=["mfcc", "hubert"], type=str)
parser.add_argument(
"--layer-index",
default=6,
type=int,
help="The layer index in HuBERT model for feature extraction. (``1`` means the first layer output)",
)
parser.add_argument(
"--checkpoint-path",
default=None,
type=Path,
help="The model checkpoint of hubert_pretrain_base model.",
)
parser.add_argument("--use-gpu", default=False, type=bool)
parser.add_argument(
"--exp-dir",
type=Path,
help="The directory to store the experiment outputs.",
)
parser.add_argument(
"--num-cluster",
default=100,
type=int,
help="The number of clusters for KMeans clustering.",
)
args = parser.parse_args()
return args
def main(args):
_init_logger(args.debug)
if not args.exp_dir.exists():
args.exp_dir.mkdir()
if args.feat_type == "mfcc":
data_dir = args.exp_dir / "data" / "mfcc"
else:
data_dir = args.exp_dir / "data" / f"{args.feat_type}_{args.layer_index}"
data_dir.mkdir(parents=True, exist_ok=True)
tsv_dir = data_dir / "tsv"
feat_dir = data_dir / "feat"
km_dir = data_dir / "km_model"
label_dir = data_dir / "label"
if args.use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Create file lists for training and validation (optional)
create_tsv(args.root_dir, tsv_dir)
# Extract features for KMeans clustering
if not feat_dir.exists():
feat_dir.mkdir()
for split in ["train", "valid"]:
p = Pool(args.num_rank)
inputs = [
(
tsv_dir / f"{args.dataset}_{split}.tsv",
feat_dir,
split,
rank,
args.num_rank,
device,
args.feat_type,
args.layer_index,
args.checkpoint_path,
16_000,
)
for rank in range(1, args.num_rank + 1)
]
_ = p.starmap(dump_features, inputs)
p.close()
p.join()
# Fit KMeans clustering model
learn_kmeans(
feat_dir,
"train",
args.num_rank,
km_dir,
args.num_cluster,
)
# Predict labels for MFCC or HuBERT features
for split in ["train", "valid"]:
get_km_label(
feat_dir,
km_dir,
label_dir,
split,
args.num_rank,
device,
)
if __name__ == "__main__":
main(_parse_args())
|
# Owner(s): ["module: inductor"]
import ctypes
import torch
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.codecache import CUDACodeCache
from torch._inductor.codegen.cuda.cuda_env import nvcc_exist
from torch._inductor.exc import CUDACompileError
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import fresh_cache
_SOURCE_CODE = r"""
#include <stdio.h>
__global__
void saxpy_device(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
extern "C" {
__attribute__((__visibility__("default")))
int saxpy(int n, float a, float *x, float *y) {
// Perform SAXPY
saxpy_device<<<(n+255)/256, 256>>>(n, a, x, y);
return 0;
}
}
"""
class TestCUDACodeCache(InductorTestCase):
def test_cuda_load(self):
with fresh_cache():
# Test both .o and .so compilation.
(
object_file_path,
object_hash_key,
source_code_path0,
) = CUDACodeCache.compile(_SOURCE_CODE, "o")
dll_wrapper, so_hash_key, source_code_path1 = CUDACodeCache.load(
_SOURCE_CODE, "so"
)
self.assertEqual(source_code_path0, source_code_path1)
self.assertEqual(object_hash_key, so_hash_key)
# Test load and call functions in .so.
x = torch.rand(10).float().cuda()
y = torch.rand(10).float().cuda()
a = 5.0
expected_y = a * x + y
dll_wrapper.saxpy(
ctypes.c_int(10),
ctypes.c_float(a),
ctypes.c_void_p(x.data_ptr()),
ctypes.c_void_p(y.data_ptr()),
)
torch.testing.assert_close(y, expected_y)
def test_compilation_error(self):
with fresh_cache():
error_source_code = _SOURCE_CODE.replace("saxpy_device", "saxpy_wrong", 1)
with self.assertRaises(CUDACompileError):
CUDACodeCache.compile(error_source_code, "o")
def test_async_compile(self):
with fresh_cache():
async_compile = AsyncCompile()
compiled_res = async_compile.cuda(_SOURCE_CODE, "so")
async_compile.wait(globals())
# Test load and call functions in .so.
x = torch.rand(5).float().cuda()
y = torch.rand(5).float().cuda()
a = 2.0
expected_y = a * x + y
compiled_res.result().saxpy(
ctypes.c_int(5),
ctypes.c_float(a),
ctypes.c_void_p(x.data_ptr()),
ctypes.c_void_p(y.data_ptr()),
)
torch.testing.assert_close(y, expected_y)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if nvcc_exist():
run_tests("cuda")
|
# Owner(s): ["module: inductor"]
import ctypes
import unittest
import torch
from torch._inductor import config
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.codecache import CUDACodeCache
from torch._inductor.codegen.cuda.cuda_env import nvcc_exist
from torch._inductor.exc import CUDACompileError
from torch._inductor.test_case import TestCase as InductorTestCase
from torch._inductor.utils import fresh_cache
_SOURCE_CODE = r"""
#include <stdio.h>
__global__
void saxpy_device(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
extern "C" {
__attribute__((__visibility__("default")))
int saxpy(int n, float a, float *x, float *y) {
// Perform SAXPY
saxpy_device<<<(n+255)/256, 256>>>(n, a, x, y);
return 0;
}
}
"""
@unittest.skipIf(config.is_fbcode(), "fbcode requires different CUDA_HOME setup")
class TestCUDACodeCache(InductorTestCase):
def test_cuda_load(self):
with fresh_cache():
# Test both .o and .so compilation.
(
object_file_path,
object_hash_key,
source_code_path0,
) = CUDACodeCache.compile(_SOURCE_CODE, "o")
dll_wrapper, so_hash_key, source_code_path1 = CUDACodeCache.load(
_SOURCE_CODE, "so"
)
self.assertNotEqual(source_code_path0, source_code_path1)
self.assertNotEqual(object_hash_key, so_hash_key)
# Test load and call functions in .so.
x = torch.rand(10).float().cuda()
y = torch.rand(10).float().cuda()
a = 5.0
expected_y = a * x + y
dll_wrapper.saxpy(
ctypes.c_int(10),
ctypes.c_float(a),
ctypes.c_void_p(x.data_ptr()),
ctypes.c_void_p(y.data_ptr()),
)
torch.testing.assert_close(y, expected_y)
def test_compilation_error(self):
with fresh_cache():
error_source_code = _SOURCE_CODE.replace("saxpy_device", "saxpy_wrong", 1)
with self.assertRaises(CUDACompileError):
CUDACodeCache.compile(error_source_code, "o")
def test_async_compile(self):
with fresh_cache():
async_compile = AsyncCompile()
compiled_res = async_compile.cuda(_SOURCE_CODE, "so")
async_compile.wait(globals())
# Test load and call functions in .so.
x = torch.rand(5).float().cuda()
y = torch.rand(5).float().cuda()
a = 2.0
expected_y = a * x + y
compiled_res.result().saxpy(
ctypes.c_int(5),
ctypes.c_float(a),
ctypes.c_void_p(x.data_ptr()),
ctypes.c_void_p(y.data_ptr()),
)
torch.testing.assert_close(y, expected_y)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if nvcc_exist():
run_tests("cuda")
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class AutoAssign(SingleStageDetector):
"""Implementation of `AutoAssign: Differentiable Label Assignment for Dense
Object Detection <https://arxiv.org/abs/2007.03496>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._datapoint import Datapoint
class Video(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._datapoint import Datapoint
class Video(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
_VideoType = Union[torch.Tensor, Video]
_VideoTypeJIT = torch.Tensor
_TensorVideoType = Union[torch.Tensor, Video]
_TensorVideoTypeJIT = torch.Tensor
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer, util
class DistillKLDivLoss(nn.Module):
# TODO
def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> None:
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.loss_fct = nn.KLDivLoss(reduction="none")
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
embeddings_query = embeddings[0]
embeddings_pos = embeddings[1]
embeddings_negs = embeddings[2:]
# Compute student scores
student_scores_pos = self.similarity_fct(embeddings_query, embeddings_pos)
if len(embeddings_negs) == 1:
# Single negative case
student_scores_neg = self.similarity_fct(embeddings_query, embeddings_negs[0])
else:
# Multiple negatives case
student_scores_neg = torch.stack(
[self.similarity_fct(embeddings_query, neg) for neg in embeddings_negs],
dim=1,
)
# Teacher scores
teacher_pos_scores = labels[:, 0].unsqueeze(1)
teacher_neg_scores = labels[:, 1:]
# Prepare student scores to match teacher scores shape
student_scores_pos = student_scores_pos.unsqueeze(1)
# Create log probabilities for student scores
if len(embeddings_negs) == 1:
student_scores_neg = student_scores_neg.unsqueeze(1)
student_scores = torch.cat([student_scores_pos, student_scores_neg], dim=1)
else:
student_scores = torch.cat([student_scores_pos, student_scores_neg], dim=1)
student_log_probs = torch.log_softmax(student_scores, dim=1)
# Create probabilities for teacher scores
teacher_scores = torch.cat([teacher_pos_scores, teacher_neg_scores], dim=1)
teacher_probs = torch.softmax(teacher_scores, dim=1)
# KL Divergence
loss = self.loss_fct(student_log_probs, teacher_probs).sum(dim=1).mean()
return loss
@property
def citation(self) -> str:
return """
@misc{lin2020distillingdenserepresentationsranking,
title={Distilling Dense Representations for Ranking using Tightly-Coupled Teachers},
author={Sheng-Chieh Lin and Jheng-Hong Yang and Jimmy Lin},
year={2020},
eprint={2010.11386},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2010.11386},
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import SentenceTransformer, util
class DistillKLDivLoss(nn.Module):
# TODO
def __init__(self, model: SentenceTransformer, similarity_fct=util.pairwise_dot_score) -> None:
super().__init__()
self.model = model
self.similarity_fct = similarity_fct
self.loss_fct = nn.KLDivLoss(reduction="none")
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings, labels)
def compute_loss_from_embeddings(self, embeddings: list[Tensor], labels: Tensor) -> Tensor:
embeddings_query = embeddings[0]
embeddings_pos = embeddings[1]
embeddings_neg = embeddings[2]
# Compute student scores
student_scores_pos = self.similarity_fct(embeddings_query, embeddings_pos)
student_scores_neg = self.similarity_fct(embeddings_query, embeddings_neg)
# Pack into one tensor and apply log_softmax
student_scores = torch.stack([student_scores_pos, student_scores_neg], dim=1)
student_log_probs = torch.log_softmax(student_scores, dim=1)
# Labels contain teacher similarity scores (already computed before training)
# We expect labels to contain the teacher_pos_score and teacher_neg_score
teacher_pos_scores = labels[:, 0]
teacher_neg_scores = labels[:, 1]
teacher_scores = torch.stack([teacher_pos_scores, teacher_neg_scores], dim=1)
teacher_probs = torch.softmax(teacher_scores, dim=1)
# KL Divergence
loss = self.loss_fct(student_log_probs, teacher_probs).sum(dim=1).mean()
return loss
@property
def citation(self) -> str:
return """
@misc{lin2020distillingdenserepresentationsranking,
title={Distilling Dense Representations for Ranking using Tightly-Coupled Teachers},
author={Sheng-Chieh Lin and Jheng-Hong Yang and Jimmy Lin},
year={2020},
eprint={2010.11386},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2010.11386},
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.datasets.builder import build_dataset
from mmdet.models.utils import mask2ndarray
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = build_dataset(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.datasets.builder import build_dataset
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = build_dataset(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_sample'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_sample'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(np.bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
"""Create a key-value store for any langchain serializable object."""
from typing import Callable, Optional
from langchain_core.documents import Document
from langchain_core.load import Serializable, dumps, loads
from langchain_core.stores import BaseStore, ByteStore
from langchain.storage.encoder_backed import EncoderBackedStore
def _dump_as_bytes(obj: Serializable) -> bytes:
"""Return a bytes representation of a document."""
return dumps(obj).encode("utf-8")
def _dump_document_as_bytes(obj: Document) -> bytes:
"""Return a bytes representation of a document."""
if not isinstance(obj, Document):
msg = "Expected a Document instance"
raise TypeError(msg)
return dumps(obj).encode("utf-8")
def _load_document_from_bytes(serialized: bytes) -> Document:
"""Return a document from a bytes representation."""
obj = loads(serialized.decode("utf-8"))
if not isinstance(obj, Document):
msg = f"Expected a Document instance. Got {type(obj)}"
raise TypeError(msg)
return obj
def _load_from_bytes(serialized: bytes) -> Serializable:
"""Return a document from a bytes representation."""
return loads(serialized.decode("utf-8"))
def _identity(x: str) -> str:
"""Return the same object."""
return x
# PUBLIC API
def create_lc_store(
store: ByteStore,
*,
key_encoder: Optional[Callable[[str], str]] = None,
) -> BaseStore[str, Serializable]:
"""Create a store for langchain serializable objects from a bytes store.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(
store,
key_encoder or _identity,
_dump_as_bytes,
_load_from_bytes,
)
def create_kv_docstore(
store: ByteStore,
*,
key_encoder: Optional[Callable[[str], str]] = None,
) -> BaseStore[str, Document]:
"""Create a store for langchain Document objects from a bytes store.
This store does run time type checking to ensure that the values are
Document objects.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(
store,
key_encoder or _identity,
_dump_document_as_bytes,
_load_document_from_bytes,
)
|
"""Create a key-value store for any langchain serializable object."""
from typing import Callable, Optional
from langchain_core.documents import Document
from langchain_core.load import Serializable, dumps, loads
from langchain_core.stores import BaseStore, ByteStore
from langchain.storage.encoder_backed import EncoderBackedStore
def _dump_as_bytes(obj: Serializable) -> bytes:
"""Return a bytes representation of a document."""
return dumps(obj).encode("utf-8")
def _dump_document_as_bytes(obj: Document) -> bytes:
"""Return a bytes representation of a document."""
if not isinstance(obj, Document):
raise TypeError("Expected a Document instance")
return dumps(obj).encode("utf-8")
def _load_document_from_bytes(serialized: bytes) -> Document:
"""Return a document from a bytes representation."""
obj = loads(serialized.decode("utf-8"))
if not isinstance(obj, Document):
raise TypeError(f"Expected a Document instance. Got {type(obj)}")
return obj
def _load_from_bytes(serialized: bytes) -> Serializable:
"""Return a document from a bytes representation."""
return loads(serialized.decode("utf-8"))
def _identity(x: str) -> str:
"""Return the same object."""
return x
# PUBLIC API
def create_lc_store(
store: ByteStore,
*,
key_encoder: Optional[Callable[[str], str]] = None,
) -> BaseStore[str, Serializable]:
"""Create a store for langchain serializable objects from a bytes store.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(
store,
key_encoder or _identity,
_dump_as_bytes,
_load_from_bytes,
)
def create_kv_docstore(
store: ByteStore,
*,
key_encoder: Optional[Callable[[str], str]] = None,
) -> BaseStore[str, Document]:
"""Create a store for langchain Document objects from a bytes store.
This store does run time type checking to ensure that the values are
Document objects.
Args:
store: A bytes store to use as the underlying store.
key_encoder: A function to encode keys; if None uses identity function.
Returns:
A key-value store for documents.
"""
return EncoderBackedStore(
store,
key_encoder or _identity,
_dump_document_as_bytes,
_load_document_from_bytes,
)
|
from typing import Any, Dict
import torch
from torchvision.transforms.v2 import functional as F, Transform
class UniformTemporalSubsample(Transform):
"""Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (torch.Tensor,)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.uniform_temporal_subsample, inpt, self.num_samples)
|
from typing import Any, Dict
import torch
from torchvision.transforms.v2 import functional as F, Transform
class UniformTemporalSubsample(Transform):
"""Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (torch.Tensor,)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return self._call_kernel(F.uniform_temporal_subsample, inpt, self.num_samples)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.config import Config
from mmdet.data_elements import DetDataSample
from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead
class TestMaskFormerFusionHead(unittest.TestCase):
def test_loss(self):
head = MaskFormerFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
mask_cls_results = torch.rand((2, 10, 5))
mask_pred_results = torch.rand((2, 10, 32, 32))
batch_data_samples = [
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (30, 30)
}),
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (29, 30)
})
]
# get panoptic and instance segmentation results
test_cfg = Config(
dict(
panoptic_on=True,
semantic_on=False,
instance_on=True,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=False)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].img_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].img_shape)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].ori_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].ori_shape)
# get empty results
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=False,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i], dict())
# semantic segmentation is not supported
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=True,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
with self.assertRaises(AssertionError):
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.config import Config
from mmdet.core.data_structures import DetDataSample
from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead
class TestMaskFormerFusionHead(unittest.TestCase):
def test_loss(self):
head = MaskFormerFusionHead(num_things_classes=2, num_stuff_classes=2)
result = head.loss()
self.assertTrue(not head.with_loss)
self.assertDictEqual(result, dict())
def test_predict(self):
mask_cls_results = torch.rand((2, 10, 5))
mask_pred_results = torch.rand((2, 10, 32, 32))
batch_data_samples = [
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (30, 30)
}),
DetDataSample(
metainfo={
'batch_input_shape': (32, 32),
'img_shape': (32, 30),
'ori_shape': (29, 30)
})
]
# get panoptic and instance segmentation results
test_cfg = Config(
dict(
panoptic_on=True,
semantic_on=False,
instance_on=True,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=False)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].img_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].img_shape)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i]['pan_results'].sem_seg.shape[-2:],
batch_data_samples[i].ori_shape)
self.assertEqual(results[i]['ins_results'].masks.shape[-2:],
batch_data_samples[i].ori_shape)
# get empty results
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=False,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
for i in range(len(results)):
self.assertEqual(results[i], dict())
# semantic segmentation is not supported
test_cfg = Config(
dict(
panoptic_on=False,
semantic_on=True,
instance_on=False,
max_per_image=10,
object_mask_thr=0.3,
iou_thr=0.3,
filter_low_score=False))
head = MaskFormerFusionHead(
num_things_classes=2, num_stuff_classes=2, test_cfg=test_cfg)
with self.assertRaises(AssertionError):
results = head.predict(
mask_cls_results,
mask_pred_results,
batch_data_samples,
rescale=True)
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocList[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocList[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocList.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocList[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
def test_from_remote_csv_file():
remote_url = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/books.csv?raw=true'
class Book(BaseDoc):
title: str
author: str
year: int
books = DocList[Book].from_csv(file_path=remote_url)
assert len(books) == 3
|
import os
from typing import Optional
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from tests import TOYDATA_DIR
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
image2: ImageDoc
return MyDocNested
def test_to_from_csv(tmpdir, nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
image2=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc(), image2=ImageDoc()),
]
)
tmp_file = str(tmpdir / 'tmp.csv')
da.to_csv(tmp_file)
assert os.path.isfile(tmp_file)
da_from = DocList[nested_doc_cls].from_csv(tmp_file)
for doc1, doc2 in zip(da, da_from):
assert doc1 == doc2
def test_from_csv_nested(nested_doc_cls):
da = DocList[nested_doc_cls].from_csv(
file_path=str(TOYDATA_DIR / 'docs_nested.csv')
)
assert len(da) == 3
for i, doc in enumerate(da):
assert doc.count.__class__ == int
assert doc.count == int(f'{i}{i}{i}')
assert doc.text.__class__ == str
assert doc.text == f'hello {i}'
assert doc.image.__class__ == ImageDoc
assert doc.image.tensor is None
assert doc.image.embedding is None
assert doc.image.bytes_ is None
assert doc.image2.__class__ == ImageDoc
assert doc.image2.tensor is None
assert doc.image2.embedding is None
assert doc.image2.bytes_ is None
assert da[0].image2.url == 'image_10.png'
assert da[1].image2.url is None
assert da[2].image2.url is None
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_csv_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
DocList.from_csv(file_path=str(TOYDATA_DIR / 'docs_nested.csv'))
def test_from_csv_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
DocList[nested_doc.__class__].from_csv(file_path=str(TOYDATA_DIR / 'docs.csv'))
def test_from_remote_csv_file():
remote_url = 'https://github.com/docarray/docarray/blob/feat-csv-from-remote-file/tests/toydata/books.csv?raw=true'
class Book(BaseDoc):
title: str
author: str
year: int
books = DocList[Book].from_csv(file_path=remote_url)
assert len(books) == 3
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct-improved{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("Train sentences: {}".format(len(train_sentences)))
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer
import logging
from datetime import datetime
import gzip
import sys
import tqdm
from torch.utils.data import DataLoader
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
## Training parameters
model_name = 'distilbert-base-uncased'
batch_size = 128
num_epochs = 1
max_seq_length = 75
#Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ''
if len(sys.argv) >= 3:
output_name = "-"+sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = 'output/train_ct-improved{}-{}'.format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8') as fIn:
for line in tqdm.tqdm(fIn, desc='Read file'):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("Train sentences: {}".format(len(train_sentences)))
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={'lr': 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False # Set to True, if your GPU supports FP16 cores
)
|
from typing import Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of [`NdArray`][docarray.typing.NdArray], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
# doc_2.video_tensor.save(file_path='/tmp/file_2.mp4')
```
---
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
) -> T:
tensor = super()._docarray_validate(value=value)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoNdArray')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_ndarray')
class VideoNdArray(NdArray, VideoTensorMixin):
"""
Subclass of [`NdArray`][docarray.typing.NdArray], to represent a video tensor.
Adds video-specific features to the tensor.
---
```python
from typing import Optional
import numpy as np
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.typing import VideoNdArray, VideoUrl
class MyVideoDoc(BaseDoc):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoNdArray]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=np.random.random((100, 224, 224, 3)),
)
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
doc_2.video_tensor = parse_obj_as(VideoNdArray, doc_2.url.load().video)
# doc_2.video_tensor.save(file_path='/tmp/file_2.mp4')
```
---
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_owlvit import *
from .feature_extraction_owlvit import *
from .image_processing_owlvit import *
from .image_processing_owlvit_fast import *
from .modeling_owlvit import *
from .processing_owlvit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_owlvit import *
from .feature_extraction_owlvit import *
from .image_processing_owlvit import *
from .modeling_owlvit import *
from .processing_owlvit import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList.from_dataframe(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList[nested_doc.__class__].from_dataframe(df=df)
def test_doc_list_error():
class Book(BaseDoc):
title: str
docs = DocList([Book(title='hello'), Book(title='world')])
with pytest.raises(TypeError):
docs.to_dataframe()
|
from typing import Optional
import pandas as pd
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
@pytest.fixture()
def nested_doc_cls():
class MyDoc(BaseDoc):
count: Optional[int]
text: str
class MyDocNested(MyDoc):
image: ImageDoc
return MyDocNested
def test_to_from_pandas_df(nested_doc_cls):
da = DocList[nested_doc_cls](
[
nested_doc_cls(
count=0,
text='hello',
image=ImageDoc(url='aux.png'),
),
nested_doc_cls(text='hello world', image=ImageDoc()),
]
)
df = da.to_dataframe()
assert isinstance(df, pd.DataFrame)
assert len(df) == 2
assert (
df.columns
== [
'id',
'count',
'text',
'image__id',
'image__url',
'image__tensor',
'image__embedding',
'image__bytes_',
]
).all()
da_from_df = DocList[nested_doc_cls].from_dataframe(df)
for doc1, doc2 in zip(da, da_from_df):
assert doc1 == doc2
@pytest.fixture()
def nested_doc():
class Inner(BaseDoc):
img: Optional[ImageDoc]
class Middle(BaseDoc):
img: Optional[ImageDoc]
inner: Optional[Inner]
class Outer(BaseDoc):
img: Optional[ImageDoc]
middle: Optional[Middle]
doc = Outer(
img=ImageDoc(), middle=Middle(img=ImageDoc(), inner=Inner(img=ImageDoc()))
)
return doc
def test_from_pandas_without_schema_raise_exception():
with pytest.raises(TypeError, match='no document schema defined'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList.from_dataframe(df=df)
def test_from_pandas_with_wrong_schema_raise_exception(nested_doc):
with pytest.raises(ValueError, match='Column names do not match the schema'):
df = pd.DataFrame(
columns=['title', 'count'], data=[['title 0', 0], ['title 1', 1]]
)
DocList[nested_doc.__class__].from_dataframe(df=df)
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
|
from typing import List
import torch
import torchaudio.prototype.transforms as T
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import get_spectrogram, get_whitenoise, nested_params, TestBaseMixin
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.cdouble if i.is_complex() else torch.double, device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (4, 3, 2)
L_x, L_y = 23, 40
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(dtype=self.dtype, device=self.device)
self.assert_grad(convolve, [x, y])
def test_barkspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~e-16) difference.
sample_rate = 8000
transform = T.BarkSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_barkscale(self):
sample_rate = 8000
n_fft = 400
n_barks = n_fft // 2 + 1
transform = T.BarkScale(sample_rate=sample_rate, n_barks=n_barks)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2), n_fft=n_fft, power=1
)
self.assert_grad(transform, [spec])
def test_Speed(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.Speed(1000, 1.1).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
def test_SpeedPerturbation(self):
leading_dims = (3, 2)
time = 200
waveform = torch.rand(*leading_dims, time, dtype=torch.float64, device=self.device, requires_grad=True)
lengths = torch.randint(1, time, leading_dims, dtype=torch.float64, device=self.device)
speed = T.SpeedPerturbation(1000, [0.9]).to(device=self.device, dtype=torch.float64)
assert gradcheck(speed, (waveform, lengths))
assert gradgradcheck(speed, (waveform, lengths))
@nested_params([True, False])
def test_AddNoise(self, use_lengths):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=torch.float64, device=self.device, requires_grad=True)
if use_lengths:
lengths = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True)
else:
lengths = None
snr = torch.rand(*leading_dims, dtype=torch.float64, device=self.device, requires_grad=True) * 10
add_noise = T.AddNoise().to(self.device, torch.float64)
assert gradcheck(add_noise, (waveform, noise, snr, lengths))
assert gradgradcheck(add_noise, (waveform, noise, snr, lengths))
def test_Preemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
preemphasis = T.Preemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(preemphasis, (waveform,))
assert gradgradcheck(preemphasis, (waveform,))
def test_Deemphasis(self):
waveform = torch.rand(3, 4, 10, dtype=torch.float64, device=self.device, requires_grad=True)
deemphasis = T.Deemphasis(coeff=0.97).to(dtype=torch.float64, device=self.device)
assert gradcheck(deemphasis, (waveform,))
assert gradgradcheck(deemphasis, (waveform,))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_occluded_metric import CocoOccludedSeparatedMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_det_results import DumpDetResults
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals',
'CocoOccludedSeparatedMetric', 'DumpDetResults'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
from .crowdhuman_metric import CrowdHumanMetric
from .dump_proposals_metric import DumpProposals
from .lvis_metric import LVISMetric
from .openimages_metric import OpenImagesMetric
from .voc_metric import VOCMetric
__all__ = [
'CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric', 'OpenImagesMetric',
'VOCMetric', 'LVISMetric', 'CrowdHumanMetric', 'DumpProposals'
]
|
import re
from typing import Any
from langchain.evaluation.schema import StringEvaluator
class RegexMatchStringEvaluator(StringEvaluator):
"""Compute a regex match between the prediction and the reference.
Examples
----------
>>> evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE)
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^mindy.*cto$",
) # This will return {'score': 1.0} due to the IGNORECASE flag
>>> evaluator = RegexMatchStringEvaluator()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$",
) # This will return {'score': 0.0}
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$|^Mindy.*CTO$",
) # This will return {'score': 1.0} as the prediction matches the second pattern in the union
""" # noqa: E501
def __init__(self, *, flags: int = 0, **kwargs: Any): # Default is no flags
super().__init__()
self.flags = flags
@property
def requires_input(self) -> bool:
"""
This evaluator does not require input.
"""
return False
@property
def requires_reference(self) -> bool:
"""
This evaluator requires a reference.
"""
return True
@property
def input_keys(self) -> list[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return "regex_match"
def _evaluate_strings( # type: ignore[override]
self,
*,
prediction: str,
reference: str,
**kwargs: Any,
) -> dict:
"""
Evaluate the regex match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference regex pattern.
Returns:
dict: The evaluation results containing the score.
"""
match = re.match(reference, prediction, flags=self.flags)
return {"score": int(bool(match))}
|
import re
from typing import Any
from langchain.evaluation.schema import StringEvaluator
class RegexMatchStringEvaluator(StringEvaluator):
"""Compute a regex match between the prediction and the reference.
Examples
----------
>>> evaluator = RegexMatchStringEvaluator(flags=re.IGNORECASE)
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^mindy.*cto$",
) # This will return {'score': 1.0} due to the IGNORECASE flag
>>> evaluator = RegexMatchStringEvaluator()
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$",
) # This will return {'score': 0.0}
>>> evaluator.evaluate_strings(
prediction="Mindy is the CTO",
reference="^Mike.*CEO$|^Mindy.*CTO$",
) # This will return {'score': 1.0} as the prediction matches the second pattern in the union
""" # noqa: E501
def __init__(self, *, flags: int = 0, **kwargs: Any): # Default is no flags
super().__init__()
self.flags = flags
@property
def requires_input(self) -> bool:
"""
This evaluator does not require input.
"""
return False
@property
def requires_reference(self) -> bool:
"""
This evaluator requires a reference.
"""
return True
@property
def input_keys(self) -> list[str]:
"""
Get the input keys.
Returns:
List[str]: The input keys.
"""
return ["reference", "prediction"]
@property
def evaluation_name(self) -> str:
"""
Get the evaluation name.
Returns:
str: The evaluation name.
"""
return "regex_match"
def _evaluate_strings( # type: ignore[arg-type,override]
self,
*,
prediction: str,
reference: str,
**kwargs: Any,
) -> dict:
"""
Evaluate the regex match between the prediction and the reference.
Args:
prediction (str): The prediction string.
reference (Optional[str], optional): The reference regex pattern.
Returns:
dict: The evaluation results containing the score.
"""
match = re.match(reference, prediction, flags=self.flags)
return {"score": int(bool(match))}
|
import shutil
import pytest
import os
import numpy as np
import PIL.Image as Image
from jina import DocumentArray, Document
from ...big_transfer import BigTransferEncoder
directory = os.path.dirname(os.path.realpath(__file__))
def test_initialization_and_model_download():
shutil.rmtree('pretrained', ignore_errors=True)
# This call will download the model
encoder = BigTransferEncoder()
assert encoder.model_path == 'pretrained'
assert encoder.model_name == 'R50x1'
assert not encoder.on_gpu
assert os.path.exists('pretrained')
assert os.path.exists(os.path.join('pretrained', 'saved_model.pb'))
# This call will use the downloaded model
_ = BigTransferEncoder()
shutil.rmtree('pretrained', ignore_errors=True)
with pytest.raises(AttributeError):
_ = BigTransferEncoder(model_name='model_not_exists')
def test_encoding():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder()
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_preprocessing():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder(target_dim=(256, 256, 3))
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_encoding_default_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder(default_traversal_paths=['c'])
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
def test_encoding_override_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder()
assert encoder.default_traversal_paths == ['r']
encoder.encode(DocumentArray([doc]),
parameters={'traversal_paths': ['c']})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
|
import shutil
import pytest
import os
import numpy as np
import PIL.Image as Image
from jina import DocumentArray, Document
from jinahub.image.encoder.big_transfer import BigTransferEncoder
directory = os.path.dirname(os.path.realpath(__file__))
def test_initialization_and_model_download():
shutil.rmtree('pretrained', ignore_errors=True)
# This call will download the model
encoder = BigTransferEncoder()
assert encoder.model_path == 'pretrained'
assert encoder.model_name == 'R50x1'
assert not encoder.on_gpu
assert os.path.exists('pretrained')
assert os.path.exists(os.path.join('pretrained', 'saved_model.pb'))
# This call will use the downloaded model
_ = BigTransferEncoder()
shutil.rmtree('pretrained', ignore_errors=True)
with pytest.raises(AttributeError):
_ = BigTransferEncoder(model_name='model_not_exists')
def test_encoding():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder()
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_preprocessing():
doc = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
doc.convert_image_uri_to_blob()
img = Image.fromarray(doc.blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.blob = img
assert doc.embedding is None
encoder = BigTransferEncoder(target_dim=(256, 256, 3))
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding.shape == (2048,)
def test_encoding_default_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder(default_traversal_paths=['c'])
encoder.encode(DocumentArray([doc]), {})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
def test_encoding_override_chunks():
doc = Document(text="testing")
chunk = Document(uri=os.path.join(directory, '../test_data/test_image.png'))
for i in range(3):
doc.chunks.append(chunk)
doc.chunks[i].convert_image_uri_to_blob()
img = Image.fromarray(doc.chunks[i].blob.astype('uint8'))
img = img.resize((96, 96))
img = np.array(img).astype('float32') / 255
doc.chunks[i].blob = img
encoder = BigTransferEncoder()
assert encoder.default_traversal_paths == ['r']
encoder.encode(DocumentArray([doc]),
parameters={'traversal_paths': ['c']})
assert doc.embedding is None
for i in range(3):
assert doc.chunks[i].embedding.shape == (2048,)
|
"""scrapegraph tool specification module for web scraping operations."""
from typing import Dict, List, Optional
from pydantic import BaseModel
from scrapegraph_py import Client
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class ScrapegraphToolSpec(BaseToolSpec):
"""scrapegraph tool specification for web scraping operations."""
spec_functions = [
"scrapegraph_smartscraper",
"scrapegraph_markdownify",
"scrapegraph_search",
]
def scrapegraph_smartscraper(
self,
prompt: str,
url: str,
api_key: str,
schema: Optional[List[BaseModel]] = None,
) -> List[Dict]:
"""Perform synchronous web scraping using scrapegraph.
Args:
prompt (str): User prompt describing the scraping task
url (str): Target website URL to scrape
api_key (str): scrapegraph API key
schema (Optional[List[BaseModel]]): Pydantic models defining the output structure
Returns:
List[Dict]: Scraped data matching the provided schema
"""
client = Client(api_key=api_key)
# Basic usage
return client.smartscraper(
website_url=url, user_prompt=prompt, output_schema=schema
)
def scrapegraph_markdownify(self, url: str, api_key: str) -> str:
"""Convert webpage content to markdown format using scrapegraph.
Args:
url (str): Target website URL to convert
api_key (str): scrapegraph API key
Returns:
str: Markdown representation of the webpage content
"""
client = Client(api_key=api_key)
return client.markdownify(website_url=url)
def scrapegraph_search(self, query: str, api_key: str) -> str:
"""Perform a search query using scrapegraph.
Args:
query (str): Search query to execute
api_key (str): scrapegraph API key
Returns:
str: Search results from scrapegraph
"""
client = Client(api_key=api_key)
return client.search(query=query)
|
"""scrapegraph tool specification module for web scraping operations."""
from typing import Dict, List, Optional
from pydantic import BaseModel
from scrapegraph_py import Client
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class ScrapegraphToolSpec(BaseToolSpec):
"""scrapegraph tool specification for web scraping operations."""
spec_functions = [
"scrapegraph_smartscraper",
"scrapegraph_markdownify",
"scrapegraph_local_scrape",
]
def scrapegraph_smartscraper(
self,
prompt: str,
url: str,
api_key: str,
schema: Optional[List[BaseModel]] = None,
) -> List[Dict]:
"""Perform synchronous web scraping using scrapegraph.
Args:
prompt (str): User prompt describing the scraping task
url (str): Target website URL to scrape
api_key (str): scrapegraph API key
schema (Optional[List[BaseModel]]): Pydantic models defining the output structure
Returns:
List[Dict]: Scraped data matching the provided schema
"""
client = Client(api_key=api_key)
# Basic usage
return client.smartscraper(
website_url=url, user_prompt=prompt, output_schema=schema
)
def scrapegraph_markdownify(self, url: str, api_key: str) -> str:
"""Convert webpage content to markdown format using scrapegraph.
Args:
url (str): Target website URL to convert
api_key (str): scrapegraph API key
Returns:
str: Markdown representation of the webpage content
"""
client = Client(api_key=api_key)
return client.markdownify(website_url=url)
def scrapegraph_local_scrape(self, text: str, api_key: str) -> str:
"""Extract structured data from raw text using scrapegraph.
Args:
text (str): Raw text to process and extract data from
api_key (str): scrapegraph API key
Returns:
str: Structured data extracted from the input text
"""
client = Client(api_key=api_key)
return client.local_scrape(text=text)
|
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.bytes.image_bytes import ImageBytes
from docarray.typing.bytes.video_bytes import VideoBytes
__all__ = ['ImageBytes', 'VideoBytes', 'AudioBytes']
|
from docarray.typing.bytes.image_bytes import ImageBytes
__all__ = ['ImageBytes']
|
from llama_index_instrumentation.events.span import SpanDropEvent # noqa
|
from llama_index.core.instrumentation.events.base import BaseEvent
class SpanDropEvent(BaseEvent):
"""
SpanDropEvent.
Args:
err_str (str): Error string.
"""
err_str: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SpanDropEvent"
|
from .bifpn import BiFPN
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
from .huber_loss import HuberLoss
from .tensorflow.anchor_generator import YXYXAnchorGenerator
from .tensorflow.coco_90class import Coco90Dataset
from .tensorflow.coco_90metric import Coco90Metric
from .tensorflow.trans_max_iou_assigner import TransMaxIoUAssigner
from .tensorflow.yxyx_bbox_coder import YXYXDeltaXYWHBBoxCoder
from .utils import Conv2dSamePadding
__all__ = [
'EfficientDet', 'BiFPN', 'HuberLoss', 'EfficientDetSepBNHead',
'Conv2dSamePadding', 'Coco90Dataset', 'Coco90Metric',
'YXYXAnchorGenerator', 'TransMaxIoUAssigner', 'YXYXDeltaXYWHBBoxCoder'
]
|
from .anchor_generator import YXYXAnchorGenerator
from .bifpn import BiFPN
from .coco_90class import Coco90Dataset
from .coco_90metric import Coco90Metric
from .efficientdet import EfficientDet
from .efficientdet_head import EfficientDetSepBNHead
from .trans_max_iou_assigner import TransMaxIoUAssigner
from .yxyx_bbox_coder import YXYXDeltaXYWHBBoxCoder
__all__ = [
'EfficientDet', 'BiFPN', 'EfficientDetSepBNHead', 'YXYXAnchorGenerator',
'YXYXDeltaXYWHBBoxCoder', 'Coco90Dataset', 'Coco90Metric',
'TransMaxIoUAssigner'
]
|
"""
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want to bother them.
"""
import json
import os
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: io",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"tutorial",
"recipe",
"example",
"build",
"style",
"perf",
"other",
}
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
REQUEST_HEADERS = {"Accept": "application/vnd.github.v3+json", "Authorization": f"token {GITHUB_TOKEN}"}
TORCHAUDIO_REPO = "https://api.github.com/repos/pytorch/audio"
def query_torchaudio(cmd: str) -> Any:
response = requests.get(f"{TORCHAUDIO_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_merger_and_number(commit_hash: str) -> Optional[str]:
data = query_torchaudio(f"commits/{commit_hash}")
commit_message = data["commit"]["message"]
pulled_by = commit_message.split("Pulled By: ")
pulled_by = pulled_by[1].split("\n")[0] if len(pulled_by) > 1 else None
pr_number = commit_message.split("Pull Request resolved: https://github.com/pytorch/audio/pull/")
pr_number = pr_number[1].split("\n")[0] if len(pr_number) > 1 else None
return pulled_by, pr_number
def get_labels(pr_number: int) -> Set[str]:
data = query_torchaudio(f"pulls/{pr_number}")
labels = {label["name"] for label in data["labels"]}
return labels
def post_github_comment(pr_number: int, merger: str) -> Any:
message = {
"body": f"Hey @{merger}."
+ """
You merged this PR, but labels were not properly added. Please add a primary and secondary label \
(See https://github.com/pytorch/audio/blob/main/.github/process_commit.py).
Some guidance:
Use 'module: ops' for operations under 'torchaudio/{transforms, functional}', \
and ML-related components under 'torchaudio/csrc' (e.g. RNN-T loss).
Things in "examples" directory:
'recipe' is applicable to training recipes under the 'examples' folder,
'tutorial' is applicable to tutorials under the “examples/tutorials” folder
'example' is applicable to everything else (e.g. C++ examples)
'module: docs' is applicable to code documentations (not to tutorials). \
Regarding examples in code documentations, please also use 'module: docs'.
Please use 'other' tag only when you’re sure the changes are not much relevant to users, \
or when all other tags are not applicable. Try not to use it often, in order to minimize \
efforts required when we prepare release notes.
When preparing release notes, please make sure 'documentation' and 'tutorials' occur as the \
last sub-categories under each primary category like 'new feature', 'improvements' or 'prototype'.
Things related to build are by default excluded from the release note, except when it impacts users.
For example:
* Drop support of Python 3.7.
* Add support of Python 3.X.
* Changing the way a third party library is bound (so that user needs to install it separately).
"""
}
response = requests.post(
f"{TORCHAUDIO_REPO}/issues/{pr_number}/comments", json.dumps(message), headers=REQUEST_HEADERS
)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
merger, pr_number = get_pr_merger_and_number(commit_hash)
if pr_number:
labels = get_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
post_github_comment(pr_number, merger)
|
"""
This script finds the person responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'.
Note: we only ping the person who pulls the pr, not the reviewers, as the reviewers can sometimes be external
to torchaudio with no labeling responsibility, so we don't want to bother them.
"""
import json
import os
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"tutorial",
"recipe",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: I/O",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"build",
"style",
"perf",
"other",
}
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
REQUEST_HEADERS = {"Accept": "application/vnd.github.v3+json", "Authorization": f"token {GITHUB_TOKEN}"}
TORCHAUDIO_REPO = "https://api.github.com/repos/pytorch/audio"
def query_torchaudio(cmd: str) -> Any:
response = requests.get(f"{TORCHAUDIO_REPO}/{cmd}", headers=REQUEST_HEADERS)
return response.json()
def get_pr_merger_and_number(commit_hash: str) -> Optional[str]:
data = query_torchaudio(f"commits/{commit_hash}")
commit_message = data["commit"]["message"]
pulled_by = commit_message.split("Pulled By: ")
pulled_by = pulled_by[1].split("\n")[0] if len(pulled_by) > 1 else None
pr_number = commit_message.split("Pull Request resolved: https://github.com/pytorch/audio/pull/")
pr_number = pr_number[1].split("\n")[0] if len(pr_number) > 1 else None
return pulled_by, pr_number
def get_labels(pr_number: int) -> Set[str]:
data = query_torchaudio(f"pulls/{pr_number}")
labels = {label["name"] for label in data["labels"]}
return labels
def post_github_comment(pr_number: int, merger: str) -> Any:
message = {
"body": f"Hey @{merger}."
+ """
You merged this PR, but labels were not properly added. Please add a primary and secondary label \
(See https://github.com/pytorch/audio/blob/main/.github/process_commit.py)"""
}
response = requests.post(
f"{TORCHAUDIO_REPO}/issues/{pr_number}/comments", json.dumps(message), headers=REQUEST_HEADERS
)
return response.json()
if __name__ == "__main__":
commit_hash = sys.argv[1]
merger, pr_number = get_pr_merger_and_number(commit_hash)
if pr_number:
labels = get_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
post_github_comment(pr_number, merger)
|
"""Test in memory docstore."""
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from the LLM, and our next step is
to filter and read its format using regular expressions to identify specific fields,
such as:
- Action: Search
- Action Input: How to use this class?
- Additional Fields: "N/A"
To assist us in this task, we use the regex_dict class. This class allows us to send a
dictionary containing an output key and the expected format, which in turn enables us to
retrieve the result of the matching formats and extract specific information from it.
To exclude irrelevant information from our return dictionary, we can instruct the LLM to
use a specific command that notifies us when it doesn't know the answer. We call this
variable the "no_update_value", and for our current case, we set it to "N/A". Therefore,
we expect the result to only contain the following fields:
{
{key = action, value = search}
{key = action_input, value = "How to use this class?"}.
}"""
def test_regex_dict_result() -> None:
"""Test regex dict result."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
result_dict = regex_dict_parser.parse(DEF_README)
print("parse_result:", result_dict) # noqa: T201
assert result_dict == DEF_EXPECTED_RESULT
def test_regex_dict_output_type() -> None:
"""Test regex dict output type."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
assert regex_dict_parser.OutputType == dict[str, str]
|
"""Test in memory docstore."""
from langchain.output_parsers.regex_dict import RegexDictParser
DEF_EXPECTED_RESULT = {"action": "Search", "action_input": "How to use this class?"}
DEF_OUTPUT_KEY_TO_FORMAT = {"action": "Action", "action_input": "Action Input"}
DEF_README = """We have just received a new result from the LLM, and our next step is
to filter and read its format using regular expressions to identify specific fields,
such as:
- Action: Search
- Action Input: How to use this class?
- Additional Fields: "N/A"
To assist us in this task, we use the regex_dict class. This class allows us to send a
dictionary containing an output key and the expected format, which in turn enables us to
retrieve the result of the matching formats and extract specific information from it.
To exclude irrelevant information from our return dictionary, we can instruct the LLM to
use a specific command that notifies us when it doesn't know the answer. We call this
variable the "no_update_value", and for our current case, we set it to "N/A". Therefore,
we expect the result to only contain the following fields:
{
{key = action, value = search}
{key = action_input, value = "How to use this class?"}.
}"""
def test_regex_dict_result() -> None:
"""Test regex dict result."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
result_dict = regex_dict_parser.parse(DEF_README)
print("parse_result:", result_dict) # noqa: T201
assert DEF_EXPECTED_RESULT == result_dict
def test_regex_dict_output_type() -> None:
"""Test regex dict output type."""
regex_dict_parser = RegexDictParser(
output_key_to_format=DEF_OUTPUT_KEY_TO_FORMAT, no_update_value="N/A"
)
assert regex_dict_parser.OutputType == dict[str, str]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import * # noqa: F401,F403
from .builder import (ANCHOR_GENERATORS, BBOX_ASSIGNERS, BBOX_CODERS,
BBOX_SAMPLERS, IOU_CALCULATORS, MATCH_COSTS,
PRIOR_GENERATORS, build_anchor_generator, build_assigner,
build_bbox_coder, build_iou_calculator, build_match_cost,
build_prior_generator, build_sampler)
from .coders import * # noqa: F401,F403
from .prior_generators import * # noqa: F401,F403
from .samplers import * # noqa: F401,F403
from .tracking import * # noqa: F401,F403
__all__ = [
'ANCHOR_GENERATORS', 'PRIOR_GENERATORS', 'BBOX_ASSIGNERS', 'BBOX_SAMPLERS',
'MATCH_COSTS', 'BBOX_CODERS', 'IOU_CALCULATORS', 'build_anchor_generator',
'build_prior_generator', 'build_assigner', 'build_sampler',
'build_iou_calculator', 'build_match_cost', 'build_bbox_coder'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import * # noqa: F401,F403
from .builder import (ANCHOR_GENERATORS, BBOX_ASSIGNERS, BBOX_CODERS,
BBOX_SAMPLERS, IOU_CALCULATORS, MATCH_COSTS,
PRIOR_GENERATORS, build_anchor_generator, build_assigner,
build_bbox_coder, build_iou_calculator, build_match_cost,
build_prior_generator, build_sampler)
from .coders import * # noqa: F401,F403
from .prior_generators import * # noqa: F401,F403
from .samplers import * # noqa: F401,F403
__all__ = [
'ANCHOR_GENERATORS', 'PRIOR_GENERATORS', 'BBOX_ASSIGNERS', 'BBOX_SAMPLERS',
'MATCH_COSTS', 'BBOX_CODERS', 'IOU_CALCULATORS', 'build_anchor_generator',
'build_prior_generator', 'build_assigner', 'build_sampler',
'build_iou_calculator', 'build_match_cost', 'build_bbox_coder'
]
|
import os
import numpy as np
import pytest
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.save(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
@pytest.mark.requires_trainable_backend
def test_scalar_weight(self):
model = keras.Sequential(name="my_sequential")
model.add(keras.Input(shape=(1,), name="my_input"))
model.add(keras.layers.Dense(1, activation="sigmoid", name="my_dense"))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
model.fit(np.array([[1]]), np.array([[1]]), verbose=0)
model_fpath = os.path.join(self.get_temp_dir(), "model.keras")
weights_fpath = os.path.join(self.get_temp_dir(), "model.weights.h5")
model.save(model_fpath)
model.save_weights(weights_fpath)
model_editor = KerasFileEditor(model_fpath)
self.assertEqual(
len(keras.src.tree.flatten(model_editor.weights_dict)), 8
)
model_weights_editor = KerasFileEditor(weights_fpath)
self.assertEqual(
len(keras.src.tree.flatten(model_weights_editor.weights_dict)), 8
)
|
import os
import numpy as np
import pytest
import keras
from keras.src import testing
from keras.src.saving.file_editor import KerasFileEditor
def get_source_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
def get_target_model():
inputs = keras.Input((2,))
x = keras.layers.Dense(3, name="mydense")(inputs)
x = keras.layers.Dense(3, name="myotherdense")(x)
outputs = keras.layers.Dense(3, name="output_layer")(x)
model = keras.Model(inputs, outputs)
return model
class SavingTest(testing.TestCase):
def test_basics(self):
temp_filepath = os.path.join(self.get_temp_dir(), "my_model.keras")
model = get_source_model()
model.save(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
target_model = get_target_model()
out = editor.compare(model) # Succeeds
self.assertEqual(out["status"], "success")
out = editor.compare(target_model) # Fails
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 2)
editor.rename_object("dense_3", "dense_4")
editor.rename_object("layers/dense_4", "dense_2")
editor.add_weights("dense_2", weights={"1": np.random.random((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.add_object(
"layers/dense_3", weights={"0": np.random.random((3, 3))}
)
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_object("layers/dense_3")
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.summary()
temp_filepath = os.path.join(self.get_temp_dir(), "resaved.weights.h5")
editor.save(temp_filepath)
target_model.load_weights(temp_filepath)
editor = KerasFileEditor(temp_filepath)
editor.summary()
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
editor.delete_weight("dense_2", "1")
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.add_weights("dense_2", {"1": np.zeros((7,))})
out = editor.compare(target_model) # Fails
self.assertEqual(out["status"], "error")
self.assertEqual(out["error_count"], 1)
editor.delete_weight("dense_2", "1")
editor.add_weights("dense_2", {"1": np.zeros((3,))})
out = editor.compare(target_model) # Succeeds
self.assertEqual(out["status"], "success")
@pytest.mark.requires_trainable_backend
def test_scalar_weight(self):
model = keras.Sequential(name="my_sequential")
model.add(keras.Input(shape=(1,), name="my_input"))
model.add(keras.layers.Dense(1, activation="sigmoid", name="my_dense"))
model.compile(optimizer="adam", loss="mse", metrics=["mae"])
model.fit(np.array([[1]]), np.array([[1]]), verbose=0)
model.save("model.keras")
model.save_weights("model.weights.h5")
model_editor = KerasFileEditor("model.keras")
self.assertEqual(
len(keras.src.tree.flatten(model_editor.weights_dict)), 8
)
model_weights_editor = KerasFileEditor("model.weights.h5")
self.assertEqual(
len(keras.src.tree.flatten(model_weights_editor.weights_dict)), 8
)
|
from llama_index.core.llms import LLM
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
def test_class():
names_of_base_classes = [b.__name__ for b in OllamaMultiModal.__mro__]
assert LLM.__name__ in names_of_base_classes
|
from llama_index.core.multi_modal_llms.base import MultiModalLLM
from llama_index.multi_modal_llms.ollama import OllamaMultiModal
def test_class():
names_of_base_classes = [b.__name__ for b in OllamaMultiModal.__mro__]
assert MultiModalLLM.__name__ in names_of_base_classes
|
# Copyright (c) OpenMMLab. All rights reserved.
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper,
DeepSpeedOptimWrapper, DefaultOptimWrapperConstructor,
OptimWrapper, OptimWrapperDict,
ZeroRedundancyOptimizer, build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'ApexOptimWrapper',
'OptimWrapperDict', 'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR',
'PolyMomentum', 'PolyParamScheduler', 'ReduceOnPlateauLR',
'ReduceOnPlateauMomentum', 'ReduceOnPlateauParamScheduler',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper', 'DeepSpeedOptimWrapper'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import is_installed
from .optimizer import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
AmpOptimWrapper, ApexOptimWrapper, BaseOptimWrapper,
DefaultOptimWrapperConstructor, OptimWrapper,
OptimWrapperDict, ZeroRedundancyOptimizer,
build_optim_wrapper)
# yapf: disable
from .scheduler import (ConstantLR, ConstantMomentum, ConstantParamScheduler,
CosineAnnealingLR, CosineAnnealingMomentum,
CosineAnnealingParamScheduler, ExponentialLR,
ExponentialMomentum, ExponentialParamScheduler,
LinearLR, LinearMomentum, LinearParamScheduler,
MultiStepLR, MultiStepMomentum,
MultiStepParamScheduler, OneCycleLR,
OneCycleParamScheduler, PolyLR, PolyMomentum,
PolyParamScheduler, ReduceOnPlateauLR,
ReduceOnPlateauMomentum, ReduceOnPlateauParamScheduler,
StepLR, StepMomentum, StepParamScheduler,
_ParamScheduler)
# yapf: enable
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS', 'build_optim_wrapper',
'DefaultOptimWrapperConstructor', 'ConstantLR', 'CosineAnnealingLR',
'ExponentialLR', 'LinearLR', 'MultiStepLR', 'StepLR', 'ConstantMomentum',
'CosineAnnealingMomentum', 'ExponentialMomentum', 'LinearMomentum',
'MultiStepMomentum', 'StepMomentum', 'ConstantParamScheduler',
'CosineAnnealingParamScheduler', 'ExponentialParamScheduler',
'LinearParamScheduler', 'MultiStepParamScheduler', 'StepParamScheduler',
'_ParamScheduler', 'OptimWrapper', 'AmpOptimWrapper', 'ApexOptimWrapper',
'OptimWrapperDict', 'OneCycleParamScheduler', 'OneCycleLR', 'PolyLR',
'PolyMomentum', 'PolyParamScheduler', 'ReduceOnPlateauLR',
'ReduceOnPlateauMomentum', 'ReduceOnPlateauParamScheduler',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper'
]
if is_installed('deepspeed'):
from .optimizer import DeepSpeedOptimWrapper # noqa:F401
__all__.append('DeepSpeedOptimWrapper')
|
from typing import Optional
import numpy as np
import torch
from docarray import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import NdArray, TorchTensor
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
from typing import Optional
import numpy as np
import torch
from docarray import DocumentArray
from docarray.document import BaseDocument
from docarray.typing import Tensor, TorchTensor
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: Tensor
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: Tensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
import logging
from sentence_transformers import LoggingHandler, SentenceTransformer
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = ["This is sentence {}".format(i) for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode_multi_process(sentences, pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
"""
This example starts multiple processes (1 per GPU), which encode
sentences in parallel. This gives a near linear speed-up
when encoding large text collections.
"""
from sentence_transformers import SentenceTransformer, LoggingHandler
import logging
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
# Important, you need to shield your code with if __name__. Otherwise, CUDA runs into issues when spawning new processes.
if __name__ == "__main__":
# Create a large list of 100k sentences
sentences = ["This is sentence {}".format(i) for i in range(100000)]
# Define the model
model = SentenceTransformer("all-MiniLM-L6-v2")
# Start the multi-process pool on all available CUDA devices
pool = model.start_multi_process_pool()
# Compute the embeddings using the multi-process pool
emb = model.encode_multi_process(sentences, pool)
print("Embeddings computed. Shape:", emb.shape)
# Optional: Stop the processes in the pool
model.stop_multi_process_pool(pool)
|
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
import enum
import pathlib
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import CSVParser, Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "dtd"
class DTDDemux(enum.IntEnum):
SPLIT = 0
JOINT_CATEGORIES = 1
IMAGES = 2
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class DTD(Dataset):
"""DTD Dataset.
homepage="https://www.robots.ox.ac.uk/~vgg/data/dtd/",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
fold: int = 1,
skip_validation_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
if not (1 <= fold <= 10):
raise ValueError(f"The fold parameter should be an integer in [1, 10]. Got {fold}")
self._fold = fold
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_validation_check)
def _resources(self) -> List[OnlineResource]:
archive = HttpResource(
"https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz",
sha256="e42855a52a4950a3b59612834602aa253914755c95b0cff9ead6d07395f8e205",
preprocess="decompress",
)
return [archive]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = pathlib.Path(data[0])
if path.parent.name == "labels":
if path.name == "labels_joint_anno.txt":
return DTDDemux.JOINT_CATEGORIES
return DTDDemux.SPLIT
elif path.parents[1].name == "images":
return DTDDemux.IMAGES
else:
return None
def _image_key_fn(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
# The split files contain hardcoded posix paths for the images, e.g. banded/banded_0001.jpg
return str(path.relative_to(path.parents[1]).as_posix())
def _prepare_sample(self, data: Tuple[Tuple[str, List[str]], Tuple[str, BinaryIO]]) -> Dict[str, Any]:
(_, joint_categories_data), image_data = data
_, *joint_categories = joint_categories_data
path, buffer = image_data
category = pathlib.Path(path).parent.name
return dict(
joint_categories={category for category in joint_categories if category},
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
splits_dp, joint_categories_dp, images_dp = Demultiplexer(
archive_dp, 3, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
splits_dp = Filter(splits_dp, path_comparator("name", f"{self._split}{self._fold}.txt"))
splits_dp = LineReader(splits_dp, decode=True, return_path=False)
splits_dp = hint_shuffling(splits_dp)
splits_dp = hint_sharding(splits_dp)
joint_categories_dp = CSVParser(joint_categories_dp, delimiter=" ")
dp = IterKeyZipper(
splits_dp,
joint_categories_dp,
key_fn=getitem(),
ref_key_fn=getitem(0),
buffer_size=INFINITE_BUFFER_SIZE,
)
dp = IterKeyZipper(
dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=self._image_key_fn,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _filter_images(self, data: Tuple[str, Any]) -> bool:
return self._classify_archive(data) == DTDDemux.IMAGES
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, self._filter_images)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
def __len__(self) -> int:
return 1_880 # All splits have the same length
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import gzip
import os
import sentence_transformers.util
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
num_dev_sentences = 1000 # Number of sentences we want to use for development
threshold = 1.075 # Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
filename_dev = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url + wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url + wikimatrix_filename, wikimatrix_filepath)
except Exception:
print("Was not able to download", download_url + wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split("\t")
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: # Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, "wt", encoding="utf8") as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, "wt", encoding="utf8") as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
"""
This script downloads the WikiMatrix corpus (https://github.com/facebookresearch/LASER/tree/master/tasks/WikiMatrix)
and create parallel sentences tsv files that can be used to extend existent sentence embedding models to new languages.
The WikiMatrix mined parallel sentences from Wikipedia in various languages.
Further information can be found in our paper:
Making Monolingual Sentence Embeddings Multilingual using Knowledge Distillation
https://arxiv.org/abs/2004.09813
"""
import os
import sentence_transformers.util
import gzip
source_languages = set(["en"]) # Languages our (monolingual) teacher model understands
target_languages = set(["de", "es", "it", "fr", "ar", "tr"]) # New languages we want to extend to
num_dev_sentences = 1000 # Number of sentences we want to use for development
threshold = 1.075 # Only use sentences with a LASER similarity score above the threshold
download_url = "https://dl.fbaipublicfiles.com/laser/WikiMatrix/v1/"
download_folder = "../datasets/WikiMatrix/"
parallel_sentences_folder = "parallel-sentences/"
os.makedirs(os.path.dirname(download_folder), exist_ok=True)
os.makedirs(parallel_sentences_folder, exist_ok=True)
for source_lang in source_languages:
for target_lang in target_languages:
filename_train = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-train.tsv.gz".format(source_lang, target_lang)
)
filename_dev = os.path.join(
parallel_sentences_folder, "WikiMatrix-{}-{}-dev.tsv.gz".format(source_lang, target_lang)
)
if not os.path.exists(filename_train) and not os.path.exists(filename_dev):
langs_ordered = sorted([source_lang, target_lang])
wikimatrix_filename = "WikiMatrix.{}-{}.tsv.gz".format(*langs_ordered)
wikimatrix_filepath = os.path.join(download_folder, wikimatrix_filename)
if not os.path.exists(wikimatrix_filepath):
print("Download", download_url + wikimatrix_filename)
try:
sentence_transformers.util.http_get(download_url + wikimatrix_filename, wikimatrix_filepath)
except Exception:
print("Was not able to download", download_url + wikimatrix_filename)
continue
if not os.path.exists(wikimatrix_filepath):
continue
train_sentences = []
dev_sentences = []
dev_sentences_set = set()
extract_dev_sentences = True
with gzip.open(wikimatrix_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
score, sent1, sent2 = line.strip().split("\t")
sent1 = sent1.strip()
sent2 = sent2.strip()
score = float(score)
if score < threshold:
break
if sent1 == sent2:
continue
if langs_ordered.index(source_lang) == 1: # Swap, so that src lang is sent1
sent1, sent2 = sent2, sent1
# Avoid duplicates in development set
if sent1 in dev_sentences_set or sent2 in dev_sentences_set:
continue
if extract_dev_sentences:
dev_sentences.append([sent1, sent2])
dev_sentences_set.add(sent1)
dev_sentences_set.add(sent2)
if len(dev_sentences) >= num_dev_sentences:
extract_dev_sentences = False
else:
train_sentences.append([sent1, sent2])
print("Write", len(dev_sentences), "dev sentences", filename_dev)
with gzip.open(filename_dev, "wt", encoding="utf8") as fOut:
for sents in dev_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("Write", len(train_sentences), "train sentences", filename_train)
with gzip.open(filename_train, "wt", encoding="utf8") as fOut:
for sents in train_sentences:
fOut.write("\t".join(sents))
fOut.write("\n")
print("---DONE---")
|
"""langchain-core version information and utilities."""
VERSION = "0.3.56"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.56rc1"
|
_base_ = './yolov3_d53_8xb8-ms-608-273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolov3_d53_mstrain-608_273e_coco.py'
# dataset settings
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
input_size = (320, 320)
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
# `mean` and `to_rgb` should be the same with the `preprocess_cfg`
dict(type='Expand', mean=[0, 0, 0], to_rgb=True, ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=input_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
import os
import torch
def average_checkpoints(last):
avg = None
for path in last:
states = torch.load(path, map_location=lambda storage, loc: storage)["state_dict"]
if avg is None:
avg = states
else:
for k in avg.keys():
avg[k] += states[k]
# average
for k in avg.keys():
if avg[k] is not None:
if avg[k].is_floating_point():
avg[k] /= len(last)
else:
avg[k] //= len(last)
return avg
def ensemble(args):
last = [os.path.join(args.exp_dir, args.exp_name, f"epoch={n}.ckpt") for n in range(args.epochs - 10, args.epochs)]
model_path = os.path.join(args.exp_dir, args.exp_name, "model_avg_10.pth")
torch.save({"state_dict": average_checkpoints(last)}, model_path)
|
import os
import torch
def average_checkpoints(last):
avg = None
for path in last:
states = torch.load(path, map_location=lambda storage, loc: storage)["state_dict"]
if avg is None:
avg = states
else:
for k in avg.keys():
avg[k] += states[k]
# average
for k in avg.keys():
if avg[k] is not None:
if avg[k].is_floating_point():
avg[k] /= len(last)
else:
avg[k] //= len(last)
return avg
def ensemble(args):
last = [
os.path.join(args.exp_dir, args.experiment_name, f"epoch={n}.ckpt")
for n in range(args.epochs - 10, args.epochs)
]
model_path = os.path.join(args.exp_dir, args.experiment_name, "model_avg_10.pth")
torch.save({"state_dict": average_checkpoints(last)}, model_path)
|
from typing import Iterable, Type
from docarray.array.abstract_array import AbstractDocumentArray
from docarray.array.mixins import GetAttributeArrayMixin, ProtoArrayMixin
from docarray.document import AnyDocument, BaseDocument, BaseNode
from docarray.document.abstract_document import AbstractDocument
class DocumentArray(
list,
ProtoArrayMixin,
GetAttributeArrayMixin,
AbstractDocumentArray,
BaseNode,
):
"""
a _GenericDocumentArray is a list-like container of Document of the same schema
:param docs: iterable of Document
"""
document_type: Type[BaseDocument] = AnyDocument
def __init__(self, docs: Iterable[AbstractDocument]):
super().__init__(doc_ for doc_ in docs)
def __class_getitem__(cls, item: Type[BaseDocument]):
if not issubclass(item, BaseDocument):
raise ValueError(
f'DocumentArray[item] item should be a Document not a {item} '
)
class _DocumenArrayTyped(DocumentArray):
document_type = item
for field in _DocumenArrayTyped.document_type.__fields__.keys():
def _proprety_generator(val: str):
return property(lambda self: self._get_documents_attribute(val))
setattr(_DocumenArrayTyped, field, _proprety_generator(field))
# this generates property on the fly based on the schema of the item
_DocumenArrayTyped.__name__ = f'DocumentArray{item.__name__}'
return _DocumenArrayTyped
|
from typing import Iterable, Type
from docarray.document import AnyDocument, BaseDocument, BaseNode
from docarray.document.abstract_document import AbstractDocument
from .abstract_array import AbstractDocumentArray
from .mixins import ProtoArrayMixin
class DocumentArray(
list,
ProtoArrayMixin,
AbstractDocumentArray,
BaseNode,
):
"""
a _GenericDocumentArray is a list-like container of Document of the same schema
:param docs: iterable of Document
"""
document_type: Type[BaseDocument] = AnyDocument
def __init__(self, docs: Iterable[AbstractDocument]):
super().__init__(doc_ for doc_ in docs)
def __class_getitem__(cls, item: Type[BaseDocument]):
if not issubclass(item, BaseDocument):
raise ValueError(
f'DocumentArray[item] item should be a Document not a {item} '
)
class _DocumenArrayTyped(DocumentArray):
document_type = item
_DocumenArrayTyped.__name__ = f'DocumentArray{item.__name__}'
return _DocumenArrayTyped
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling the tensor data of a [`Mesh3D`][docarray.documents.mesh.Mesh3D] object.
A VerticesAndFaces Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the vertices information (`VerticesAndFaces.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the faces information (`VerticesAndFaces.faces`)
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling the tensor data of a [`Mesh3D`][docarray.documents.mesh.Mesh3D] object.
A VerticesAndFaces Document can contain:
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the vertices information (`VerticesAndFaces.vertices`)
- an [`AnyTensor`](../../../../api_references/typing/tensor/tensor)
containing the faces information (`VerticesAndFaces.faces`)
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
"""
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
"""
Sphinx Read the Docs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
from os import path
import sphinx
__version__ = "0.5.0"
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
if sphinx.version_info >= (1, 6, 0):
# Register the theme that can be referenced without adding a theme path
app.add_html_theme("sphinx_rtd_theme", path.abspath(path.dirname(__file__)))
if sphinx.version_info >= (1, 8, 0):
# Add Sphinx message catalog for newer versions of Sphinx
# See http://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_message_catalog
rtd_locale_path = path.join(path.abspath(path.dirname(__file__)), "locale")
app.add_message_catalog("sphinx", rtd_locale_path)
return {"parallel_read_safe": True, "parallel_write_safe": True}
|
"""
Sphinx Read the Docs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
from os import path
import sphinx
__version__ = '0.5.0'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
if sphinx.version_info >= (1, 6, 0):
# Register the theme that can be referenced without adding a theme path
app.add_html_theme('sphinx_rtd_theme', path.abspath(path.dirname(__file__)))
if sphinx.version_info >= (1, 8, 0):
# Add Sphinx message catalog for newer versions of Sphinx
# See http://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx.application.Sphinx.add_message_catalog
rtd_locale_path = path.join(path.abspath(path.dirname(__file__)), 'locale')
app.add_message_catalog('sphinx', rtd_locale_path)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from docarray import BaseDoc
from docarray.typing import NdArray
def test_tensor_ops():
class A(BaseDoc):
tensor: NdArray[3, 224, 224]
class B(BaseDoc):
tensor: NdArray[3, 112, 224]
tensor = A(tensor=np.ones((3, 224, 224))).tensor
tensord = A(tensor=np.ones((3, 224, 224))).tensor
tensorn = np.zeros((3, 224, 224))
tensorhalf = B(tensor=np.ones((3, 112, 224))).tensor
tensorfull = np.concatenate([tensorhalf, tensorhalf], axis=1)
assert type(tensor) == NdArray
assert type(tensor + tensord) == NdArray
assert type(tensor + tensorn) == NdArray
assert type(tensor + tensorfull) == NdArray
|
import numpy as np
from docarray import BaseDoc
from docarray.typing import NdArray
def test_tensor_ops():
class A(BaseDoc):
tensor: NdArray[3, 224, 224]
class B(BaseDoc):
tensor: NdArray[3, 112, 224]
tensor = A(tensor=np.ones((3, 224, 224))).tensor
tensord = A(tensor=np.ones((3, 224, 224))).tensor
tensorn = np.zeros((3, 224, 224))
tensorhalf = B(tensor=np.ones((3, 112, 224))).tensor
tensorfull = np.concatenate([tensorhalf, tensorhalf], axis=1)
assert type(tensor) == NdArray
assert type(tensor + tensord) == NdArray
assert type(tensor + tensorn) == NdArray
assert type(tensor + tensorfull) == NdArray
|
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
) -> None:
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False) -> Tensor:
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
from .ContrastiveLoss import SiameseDistanceMetric
class OnlineContrastiveLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=SiameseDistanceMetric.COSINE_DISTANCE, margin: float = 0.5
):
"""
This Online Contrastive loss is similar to :class:`ConstrativeLoss`, but it selects hard positive (positives that
are far apart) and hard negative pairs (negatives that are close) and computes the loss only for these pairs.
This loss often yields better performances than ContrastiveLoss.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrics that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
References:
- `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
2. Data should include hard positives and hard negatives
Relations:
- :class:`ContrastiveLoss` is similar, but does not use hard positive and hard negative pairs.
:class:`OnlineContrastiveLoss` often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.OnlineContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(OnlineContrastiveLoss, self).__init__()
self.model = model
self.margin = margin
self.distance_metric = distance_metric
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor, size_average=False):
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
distance_matrix = self.distance_metric(embeddings[0], embeddings[1])
negs = distance_matrix[labels == 0]
poss = distance_matrix[labels == 1]
# select hard positive and hard negative pairs
negative_pairs = negs[negs < (poss.max() if len(poss) > 1 else negs.mean())]
positive_pairs = poss[poss > (negs.min() if len(negs) > 1 else poss.mean())]
positive_loss = positive_pairs.pow(2).sum()
negative_loss = F.relu(self.margin - negative_pairs).pow(2).sum()
loss = positive_loss + negative_loss
return loss
|
from __future__ import annotations
import re
from typing import Optional
from langchain_core.output_parsers import BaseOutputParser
class RegexParser(BaseOutputParser[dict[str, str]]):
"""Parse the output of an LLM call using a regex."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
regex: str
"""The regex to use to parse the output."""
output_keys: list[str]
"""The keys to use for the output."""
default_output_key: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_parser"
def parse(self, text: str) -> dict[str, str]:
"""Parse the output of an LLM call."""
match = re.search(self.regex, text)
if match:
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
if self.default_output_key is None:
msg = f"Could not parse output: {text}"
raise ValueError(msg)
return {
key: text if key == self.default_output_key else ""
for key in self.output_keys
}
|
from __future__ import annotations
import re
from typing import Optional
from langchain_core.output_parsers import BaseOutputParser
class RegexParser(BaseOutputParser[dict[str, str]]):
"""Parse the output of an LLM call using a regex."""
@classmethod
def is_lc_serializable(cls) -> bool:
return True
regex: str
"""The regex to use to parse the output."""
output_keys: list[str]
"""The keys to use for the output."""
default_output_key: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_parser"
def parse(self, text: str) -> dict[str, str]:
"""Parse the output of an LLM call."""
match = re.search(self.regex, text)
if match:
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
else:
if self.default_output_key is None:
msg = f"Could not parse output: {text}"
raise ValueError(msg)
else:
return {
key: text if key == self.default_output_key else ""
for key in self.output_keys
}
|
"""Test in memory indexer."""
from collections.abc import AsyncGenerator, Generator
import pytest
from langchain_tests.integration_tests.indexer import (
AsyncDocumentIndexTestSuite,
DocumentIndexerTestSuite,
)
from langchain_core.documents import Document
from langchain_core.indexing.base import DocumentIndex
from langchain_core.indexing.in_memory import (
InMemoryDocumentIndex,
)
class TestDocumentIndexerTestSuite(DocumentIndexerTestSuite):
@pytest.fixture
def index(self) -> Generator[DocumentIndex, None, None]:
yield InMemoryDocumentIndex() # noqa: PT022
class TestAsyncDocumentIndexerTestSuite(AsyncDocumentIndexTestSuite):
# Something funky is going on with mypy and async pytest fixture
@pytest.fixture
async def index(self) -> AsyncGenerator[DocumentIndex, None]: # type: ignore
yield InMemoryDocumentIndex() # noqa: PT022
def test_sync_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
index.upsert(documents)
assert index.invoke("hello") == [documents[0], documents[1]]
assert index.invoke("cat") == [documents[1], documents[0]]
async def test_async_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
await index.aupsert(documents)
assert (await index.ainvoke("hello")) == [documents[0], documents[1]]
assert (await index.ainvoke("cat")) == [documents[1], documents[0]]
|
"""Test in memory indexer."""
from collections.abc import AsyncGenerator, Generator
import pytest
from langchain_tests.integration_tests.indexer import (
AsyncDocumentIndexTestSuite,
DocumentIndexerTestSuite,
)
from langchain_core.documents import Document
from langchain_core.indexing.base import DocumentIndex
from langchain_core.indexing.in_memory import (
InMemoryDocumentIndex,
)
class TestDocumentIndexerTestSuite(DocumentIndexerTestSuite):
@pytest.fixture()
def index(self) -> Generator[DocumentIndex, None, None]:
yield InMemoryDocumentIndex()
class TestAsyncDocumentIndexerTestSuite(AsyncDocumentIndexTestSuite):
# Something funky is going on with mypy and async pytest fixture
@pytest.fixture()
async def index(self) -> AsyncGenerator[DocumentIndex, None]: # type: ignore
yield InMemoryDocumentIndex()
def test_sync_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
index.upsert(documents)
assert index.invoke("hello") == [documents[0], documents[1]]
assert index.invoke("cat") == [documents[1], documents[0]]
async def test_async_retriever() -> None:
index = InMemoryDocumentIndex()
documents = [
Document(id="1", page_content="hello world"),
Document(id="2", page_content="goodbye cat"),
]
await index.aupsert(documents)
assert (await index.ainvoke("hello")) == [documents[0], documents[1]]
assert (await index.ainvoke("cat")) == [documents[1], documents[0]]
|
import asyncio
import time
from multiprocessing import Event, Process
import aiohttp
import pytest
from jina import DocumentArray, Executor, Flow, requests
from jina.types.request.data import DataRequest
from jina.helper import random_port
INPUT_DA_LEN = 2
NUM_CLIENTS = 3
@pytest.fixture()
def gateway_port():
port = random_port()
yield port
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
for d in docs:
d.text += f'{d.id} is fooed!'
def ws_flow(start_event, stop_event, gateway_port):
with Flow(protocol='websocket', port_expose=gateway_port).add(
uses=DummyExecutor
) as f:
start_event.set()
f.block(stop_event=stop_event)
def input_da_gen():
for i in range(5):
yield DocumentArray.empty(INPUT_DA_LEN)
time.sleep(1)
def json_requestify(da: DocumentArray, exec_endpoint='/foo'):
return {
'execEndpoint': exec_endpoint,
'data': {'docs': da.to_dict()},
}
def bytes_requestify(da: DocumentArray, exec_endpoint='/foo'):
r = DataRequest()
r._pb_body.header.exec_endpoint = exec_endpoint
r.data.docs_bytes = da.to_bytes()
return r.to_bytes()
@pytest.fixture
def flow_context(gateway_port):
start_event = Event()
stop_event = Event()
p = Process(
target=ws_flow,
args=(
start_event,
stop_event,
gateway_port
),
)
p.start()
start_event.wait()
yield
stop_event.set()
p.join()
async def json_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
) as ws:
for da in input_da_gen():
request = json_requestify(da)
await ws.send_json(request)
response = await ws.receive_json()
assert isinstance(response, dict)
assert response['header']['exec_endpoint'] == '/foo'
assert len(response['data']) == INPUT_DA_LEN
for doc in response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
async def bytes_sending_client(gateway_port):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{gateway_port}/',
protocols=('bytes',),
) as ws:
for da in input_da_gen():
request = bytes_requestify(da)
await ws.send_bytes(request)
response = await ws.receive_bytes()
assert isinstance(response, bytes)
dict_response = DataRequest(response).to_dict()
assert dict_response['header']['exec_endpoint'] == '/foo'
assert len(dict_response['data']) == INPUT_DA_LEN
for doc in dict_response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
@pytest.mark.asyncio
async def test_json_single_client(flow_context, gateway_port):
await json_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_json_multiple_clients(flow_context, gateway_port):
await asyncio.wait([json_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
@pytest.mark.asyncio
async def test_bytes_single_client(flow_context, gateway_port):
await bytes_sending_client(gateway_port)
@pytest.mark.asyncio
async def test_bytes_multiple_clients(flow_context, gateway_port):
await asyncio.wait([bytes_sending_client(gateway_port) for _ in range(NUM_CLIENTS)])
|
import asyncio
import time
from multiprocessing import Event, Process
import aiohttp
import pytest
from jina import DocumentArray, Executor, Flow, requests
from jina.types.request.data import DataRequest
INPUT_DA_LEN = 2
NUM_CLIENTS = 3
GATEWAY_PORT = 12345
class DummyExecutor(Executor):
@requests(on='/foo')
def foo(self, docs: DocumentArray, **kwargs):
for d in docs:
d.text += f'{d.id} is fooed!'
def ws_flow(start_event, stop_event):
with Flow(protocol='websocket', port_expose=GATEWAY_PORT).add(
uses=DummyExecutor
) as f:
start_event.set()
f.block(stop_event=stop_event)
def input_da_gen():
for i in range(5):
yield DocumentArray.empty(INPUT_DA_LEN)
time.sleep(1)
def json_requestify(da: DocumentArray, exec_endpoint='/foo'):
return {
'execEndpoint': exec_endpoint,
'data': {'docs': da.to_dict()},
}
def bytes_requestify(da: DocumentArray, exec_endpoint='/foo'):
r = DataRequest()
r._pb_body.header.exec_endpoint = exec_endpoint
r.data.docs_bytes = da.to_bytes()
return r.to_bytes()
@pytest.fixture
def flow_context():
start_event = Event()
stop_event = Event()
p = Process(
target=ws_flow,
args=(
start_event,
stop_event,
),
)
p.start()
start_event.wait()
yield
stop_event.set()
p.join()
async def json_sending_client():
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{GATEWAY_PORT}/',
) as ws:
for da in input_da_gen():
request = json_requestify(da)
await ws.send_json(request)
response = await ws.receive_json()
assert isinstance(response, dict)
assert response['header']['exec_endpoint'] == '/foo'
assert len(response['data']) == INPUT_DA_LEN
for doc in response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
async def bytes_sending_client():
async with aiohttp.ClientSession() as session:
async with session.ws_connect(
f'ws://localhost:{GATEWAY_PORT}/',
protocols=('bytes',),
) as ws:
for da in input_da_gen():
request = bytes_requestify(da)
await ws.send_bytes(request)
response = await ws.receive_bytes()
assert isinstance(response, bytes)
dict_response = DataRequest(response).to_dict()
assert dict_response['header']['exec_endpoint'] == '/foo'
assert len(dict_response['data']) == INPUT_DA_LEN
for doc in dict_response['data']:
assert doc['text'] == f'{doc["id"]} is fooed!'
@pytest.mark.asyncio
async def test_json_single_client(flow_context):
await json_sending_client()
@pytest.mark.asyncio
async def test_json_multiple_clients(flow_context):
await asyncio.wait([json_sending_client() for i in range(NUM_CLIENTS)])
@pytest.mark.asyncio
async def test_bytes_single_client(flow_context):
await bytes_sending_client()
@pytest.mark.asyncio
async def test_bytes_multiple_clients(flow_context):
await asyncio.wait([bytes_sending_client() for i in range(NUM_CLIENTS)])
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_transformer import (
ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer)
from .deformable_detr_transformer import (
DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer)
from .detr_transformer import (DetrTransformerDecoder,
DetrTransformerDecoderLayer,
DetrTransformerEncoder,
DetrTransformerEncoderLayer)
from .dino_transformer import CdnQueryGenerator, DinoTransformerDecoder
from .utils import (MLP, AdaptivePadding, DynamicConv, PatchEmbed,
PatchMerging, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer', 'DinoTransformerDecoder',
'CdnQueryGenerator'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .conditional_detr_transformer import (
ConditionalDetrTransformerDecoder, ConditionalDetrTransformerDecoderLayer)
from .deformable_detr_transformer import (
DeformableDetrTransformerDecoder, DeformableDetrTransformerDecoderLayer,
DeformableDetrTransformerEncoder, DeformableDetrTransformerEncoderLayer)
from .detr_transformer import (DetrTransformerDecoder,
DetrTransformerDecoderLayer,
DetrTransformerEncoder,
DetrTransformerEncoderLayer)
from .utils import (MLP, AdaptivePadding, DynamicConv, PatchEmbed,
PatchMerging, inverse_sigmoid, nchw_to_nlc, nlc_to_nchw)
__all__ = [
'nlc_to_nchw', 'nchw_to_nlc', 'AdaptivePadding', 'PatchEmbed',
'PatchMerging', 'inverse_sigmoid', 'DynamicConv', 'MLP',
'DetrTransformerEncoder', 'DetrTransformerDecoder',
'DetrTransformerEncoderLayer', 'DetrTransformerDecoderLayer',
'DeformableDetrTransformerEncoder', 'DeformableDetrTransformerDecoder',
'DeformableDetrTransformerEncoderLayer',
'DeformableDetrTransformerDecoderLayer',
'ConditionalDetrTransformerDecoder',
'ConditionalDetrTransformerDecoderLayer'
]
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union
from torchvision import datapoints
from torchvision.datapoints._datapoint import _FillType, _FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[_FillType, Dict[Type, _FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: datapoints._FillType) -> datapoints._FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill # type: ignore[return-value]
def _setup_fill_arg(fill: Union[_FillType, Dict[Type, _FillType]]) -> Dict[Type, _FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
import functools
import numbers
from collections import defaultdict
from typing import Any, Dict, Literal, Sequence, Type, TypeVar, Union
from torchvision import datapoints
from torchvision.datapoints._datapoint import FillType, FillTypeJIT
from torchvision.transforms.transforms import _check_sequence_input, _setup_angle, _setup_size # noqa: F401
def _setup_float_or_seq(arg: Union[float, Sequence[float]], name: str, req_size: int = 2) -> Sequence[float]:
if not isinstance(arg, (float, Sequence)):
raise TypeError(f"{name} should be float or a sequence of floats. Got {type(arg)}")
if isinstance(arg, Sequence) and len(arg) != req_size:
raise ValueError(f"If {name} is a sequence its length should be one of {req_size}. Got {len(arg)}")
if isinstance(arg, Sequence):
for element in arg:
if not isinstance(element, float):
raise ValueError(f"{name} should be a sequence of floats. Got {type(element)}")
if isinstance(arg, float):
arg = [float(arg), float(arg)]
if isinstance(arg, (list, tuple)) and len(arg) == 1:
arg = [arg[0], arg[0]]
return arg
def _check_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> None:
if isinstance(fill, dict):
for key, value in fill.items():
# Check key for type
_check_fill_arg(value)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
_check_fill_arg(default_value)
else:
if fill is not None and not isinstance(fill, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate fill arg, only Numbers, tuples, lists and dicts are allowed.")
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
def _convert_fill_arg(fill: datapoints.FillType) -> datapoints.FillTypeJIT:
# Fill = 0 is not equivalent to None, https://github.com/pytorch/vision/issues/6517
# So, we can't reassign fill to 0
# if fill is None:
# fill = 0
if fill is None:
return fill
if not isinstance(fill, (int, float)):
fill = [float(v) for v in list(fill)]
return fill # type: ignore[return-value]
def _setup_fill_arg(fill: Union[FillType, Dict[Type, FillType]]) -> Dict[Type, FillTypeJIT]:
_check_fill_arg(fill)
if isinstance(fill, dict):
for k, v in fill.items():
fill[k] = _convert_fill_arg(v)
if isinstance(fill, defaultdict) and callable(fill.default_factory):
default_value = fill.default_factory()
sanitized_default = _convert_fill_arg(default_value)
fill.default_factory = functools.partial(_default_arg, sanitized_default)
return fill # type: ignore[return-value]
return _get_defaultdict(_convert_fill_arg(fill))
def _check_padding_arg(padding: Union[int, Sequence[int]]) -> None:
if not isinstance(padding, (numbers.Number, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if isinstance(padding, (tuple, list)) and len(padding) not in [1, 2, 4]:
raise ValueError(f"Padding must be an int or a 1, 2, or 4 element tuple, not a {len(padding)} element tuple")
# TODO: let's use torchvision._utils.StrEnum to have the best of both worlds (strings and enums)
# https://github.com/pytorch/vision/issues/6250
def _check_padding_mode_arg(padding_mode: Literal["constant", "edge", "reflect", "symmetric"]) -> None:
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")
|
import os
from torchaudio.datasets import librilight_limited
from torchaudio_unittest.common_utils import get_whitenoise, save_wav, TempDirMixin, TorchaudioTestCase
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.flac"
path = os.path.join(file_path, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
save_wav(path, data, sample_rate)
sample = (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id)
return sample
def get_mock_dataset(dataset_dir: str):
"""Create mocked dataset for a sub directory.
Args:
dataset_dir (str): the path of the sub directory.
The structure is: audio_type/speaker_id/chapter_id/filename.flac
"""
mocked_data = []
sample_rate = 16000 # 16kHz
seed = 0
for audio_type in ["clean", "other"]:
for speaker_id in range(5):
for chapter_id in range(3):
file_path = os.path.join(dataset_dir, audio_type, str(speaker_id), str(chapter_id))
os.makedirs(file_path, exist_ok=True)
trans_content = []
for utterance_id in range(3):
sample = _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed)
trans_content.append(f"{sample[3]}-{sample[4]}-{sample[5]:04d} {sample[2]}")
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(file_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
def get_mock_datasets(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data_10min, mocked_data_1h, mocked_data_10h = [], [], []
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", "0")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10min = get_mock_dataset(dataset_dir)
mocked_data_1h += mocked_data_10min
for i in range(1, 6):
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", str(i))
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_1h += get_mock_dataset(dataset_dir)
mocked_data_10h += mocked_data_1h
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "9h")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10h += get_mock_dataset(dataset_dir)
return mocked_data_10min, mocked_data_1h, mocked_data_10h
class TestLibriLightLimited(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples_10min = []
samples_1h = []
samples_10h = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
(cls.samples_10min, cls.samples_1h, cls.samples_10h) = get_mock_datasets(cls.root_dir)
def _test_librilightlimited(self, dataset, samples):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == samples[i][1]
assert transcript == samples[i][2]
assert speaker_id == samples[i][3]
assert chapter_id == samples[i][4]
assert utterance_id == samples[i][5]
num_samples += 1
assert num_samples == len(samples)
def test_librilightlimited_10min(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10min")
self._test_librilightlimited(dataset, self.samples_10min)
def test_librilightlimited_1h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="1h")
self._test_librilightlimited(dataset, self.samples_1h)
def test_librilightlimited_10h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10h")
self._test_librilightlimited(dataset, self.samples_10h)
|
import os
from torchaudio.datasets import librilight_limited
from torchaudio_unittest.common_utils import (
get_whitenoise,
save_wav,
TempDirMixin,
TorchaudioTestCase,
)
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
def _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed):
filename = f"{speaker_id}-{chapter_id}-{utterance_id:04d}.flac"
path = os.path.join(file_path, filename)
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="float32", seed=seed)
transcript = " ".join([_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]])
save_wav(path, data, sample_rate)
sample = (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id)
return sample
def get_mock_dataset(dataset_dir: str):
"""Create mocked dataset for a sub directory.
Args:
dataset_dir (str): the path of the sub directory.
The structure is: audio_type/speaker_id/chapter_id/filename.flac
"""
mocked_data = []
sample_rate = 16000 # 16kHz
seed = 0
for audio_type in ["clean", "other"]:
for speaker_id in range(5):
for chapter_id in range(3):
file_path = os.path.join(dataset_dir, audio_type, str(speaker_id), str(chapter_id))
os.makedirs(file_path, exist_ok=True)
trans_content = []
for utterance_id in range(3):
sample = _save_sample(file_path, speaker_id, chapter_id, utterance_id, sample_rate, seed)
trans_content.append(f"{sample[3]}-{sample[4]}-{sample[5]:04d} {sample[2]}")
mocked_data.append(sample)
seed += 1
trans_filename = f"{speaker_id}-{chapter_id}.trans.txt"
trans_path = os.path.join(file_path, trans_filename)
with open(trans_path, "w") as f:
f.write("\n".join(trans_content))
return mocked_data
def get_mock_datasets(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data_10min, mocked_data_1h, mocked_data_10h = [], [], []
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", "0")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10min = get_mock_dataset(dataset_dir)
mocked_data_1h += mocked_data_10min
for i in range(1, 6):
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "1h", str(i))
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_1h += get_mock_dataset(dataset_dir)
mocked_data_10h += mocked_data_1h
dataset_dir = os.path.join(root_dir, "librispeech_finetuning", "9h")
os.makedirs(dataset_dir, exist_ok=True)
mocked_data_10h += get_mock_dataset(dataset_dir)
return mocked_data_10min, mocked_data_1h, mocked_data_10h
class TestLibriLightLimited(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples_10min = []
samples_1h = []
samples_10h = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
(cls.samples_10min, cls.samples_1h, cls.samples_10h) = get_mock_datasets(cls.root_dir)
def _test_librilightlimited(self, dataset, samples):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, chapter_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == samples[i][1]
assert transcript == samples[i][2]
assert speaker_id == samples[i][3]
assert chapter_id == samples[i][4]
assert utterance_id == samples[i][5]
num_samples += 1
assert num_samples == len(samples)
def test_librilightlimited_10min(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10min")
self._test_librilightlimited(dataset, self.samples_10min)
def test_librilightlimited_1h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="1h")
self._test_librilightlimited(dataset, self.samples_1h)
def test_librilightlimited_10h(self):
dataset = librilight_limited.LibriLightLimited(self.root_dir, subset="10h")
self._test_librilightlimited(dataset, self.samples_10h)
|
"""Simple Reader that reads abstract of primary citation for a given PDB id."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.pdb.utils import get_pdb_abstract
class PdbAbstractReader(BaseReader):
"""Protein Data Bank entries' primary citation abstract reader."""
def __init__(self) -> None:
super().__init__()
def load_data(self, pdb_ids: List[str]) -> List[Document]:
"""
Load data from RCSB or EBI REST API.
Args:
pdb_ids (List[str]): List of PDB ids \
for which primary citation abstract are to be read.
"""
results = []
for pdb_id in pdb_ids:
title, abstracts = get_pdb_abstract(pdb_id)
primary_citation = abstracts[title]
abstract = primary_citation["abstract"]
abstract_text = "\n".join(
["\n".join([str(k), str(v)]) for k, v in abstract.items()]
)
results.append(
Document(
text=abstract_text,
extra_info={"pdb_id": pdb_id, "primary_citation": primary_citation},
)
)
return results
|
"""Simple Reader that reads abstract of primary citation for a given PDB id."""
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.pdb.utils import get_pdb_abstract
class PdbAbstractReader(BaseReader):
"""Protein Data Bank entries' primary citation abstract reader."""
def __init__(self) -> None:
super().__init__()
def load_data(self, pdb_ids: List[str]) -> List[Document]:
"""
Load data from RCSB or EBI REST API.
Args:
pdb_ids (List[str]): List of PDB ids \
for which primary citation abstract are to be read.
"""
results = []
for pdb_id in pdb_ids:
title, abstracts = get_pdb_abstract(pdb_id)
primary_citation = abstracts[title]
abstract = primary_citation["abstract"]
abstract_text = "\n".join(
["\n".join([str(k), str(v)]) for k, v in abstract.items()]
)
results.append(
Document(
text=abstract_text,
extra_info={"pdb_id": pdb_id, "primary_citation": primary_citation},
)
)
return results
|
import enum
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype import features
from torchvision.prototype.transforms._utils import _isinstance
from torchvision.utils import _log_api_usage_once
class Transform(nn.Module):
# Class attribute defining transformed types. Other types are passed-through without any transformation
_transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (
features.is_simple_tensor,
features._Feature,
PIL.Image.Image,
)
def __init__(self) -> None:
super().__init__()
_log_api_usage_once(self)
def _check_inputs(self, flat_inputs: List[Any]) -> None:
pass
def _get_params(self, flat_inputs: List[Any]) -> Dict[str, Any]:
return dict()
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
raise NotImplementedError
def forward(self, *inputs: Any) -> Any:
flat_inputs, spec = tree_flatten(inputs if len(inputs) > 1 else inputs[0])
self._check_inputs(flat_inputs)
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
def extra_repr(self) -> str:
extra = []
for name, value in self.__dict__.items():
if name.startswith("_") or name == "training":
continue
if not isinstance(value, (bool, int, float, str, tuple, list, enum.Enum)):
continue
extra.append(f"{name}={value}")
return ", ".join(extra)
class _RandomApplyTransform(Transform):
def __init__(self, p: float = 0.5) -> None:
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
super().__init__()
self.p = p
def forward(self, *inputs: Any) -> Any:
# We need to almost duplicate `Transform.forward()` here since we always want to check the inputs, but return
# early afterwards in case the random check triggers. The same result could be achieved by calling
# `super().forward()` after the random check, but that would call `self._check_inputs` twice.
inputs = inputs if len(inputs) > 1 else inputs[0]
flat_inputs, spec = tree_flatten(inputs)
self._check_inputs(flat_inputs)
if torch.rand(1) >= self.p:
return inputs
params = self._get_params(flat_inputs)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
|
import enum
from typing import Any, Callable, Dict, Tuple, Type, Union
import PIL.Image
import torch
from torch import nn
from torch.utils._pytree import tree_flatten, tree_unflatten
from torchvision.prototype import features
from torchvision.prototype.transforms._utils import _isinstance
from torchvision.utils import _log_api_usage_once
class Transform(nn.Module):
# Class attribute defining transformed types. Other types are passed-through without any transformation
_transformed_types: Tuple[Union[Type, Callable[[Any], bool]], ...] = (
features.is_simple_tensor,
features._Feature,
PIL.Image.Image,
)
def __init__(self) -> None:
super().__init__()
_log_api_usage_once(self)
def _check_inputs(self, sample: Any) -> None:
pass
def _get_params(self, sample: Any) -> Dict[str, Any]:
return dict()
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
raise NotImplementedError
def forward(self, *inputs: Any) -> Any:
sample = inputs if len(inputs) > 1 else inputs[0]
self._check_inputs(sample)
params = self._get_params(sample)
flat_inputs, spec = tree_flatten(sample)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
def extra_repr(self) -> str:
extra = []
for name, value in self.__dict__.items():
if name.startswith("_") or name == "training":
continue
if not isinstance(value, (bool, int, float, str, tuple, list, enum.Enum)):
continue
extra.append(f"{name}={value}")
return ", ".join(extra)
class _RandomApplyTransform(Transform):
def __init__(self, p: float = 0.5) -> None:
if not (0.0 <= p <= 1.0):
raise ValueError("`p` should be a floating point value in the interval [0.0, 1.0].")
super().__init__()
self.p = p
def forward(self, *inputs: Any) -> Any:
# We need to almost duplicate `Transform.forward()` here since we always want to check the inputs, but return
# early afterwards in case the random check triggers. The same result could be achieved by calling
# `super().forward()` after the random check, but that would call `self._check_inputs` twice.
sample = inputs if len(inputs) > 1 else inputs[0]
self._check_inputs(sample)
if torch.rand(1) >= self.p:
return sample
params = self._get_params(sample)
flat_inputs, spec = tree_flatten(sample)
flat_outputs = [
self._transform(inpt, params) if _isinstance(inpt, self._transformed_types) else inpt
for inpt in flat_inputs
]
return tree_unflatten(flat_outputs, spec)
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/', seg='stuffthingmaps/train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
from docarray.documents.audio import AudioDoc
from docarray.documents.image import ImageDoc
from docarray.documents.mesh import Mesh3D, VerticesAndFaces
from docarray.documents.point_cloud import PointCloud3D, PointsAndColors
from docarray.documents.text import TextDoc
from docarray.documents.video import VideoDoc
__all__ = [
'TextDoc',
'ImageDoc',
'AudioDoc',
'Mesh3D',
'VerticesAndFaces',
'PointCloud3D',
'PointsAndColors',
'VideoDoc',
]
|
from docarray.documents.audio import AudioDoc
from docarray.documents.image import ImageDoc
from docarray.documents.mesh import Mesh3D
from docarray.documents.point_cloud import PointCloud3D
from docarray.documents.text import TextDoc
from docarray.documents.video import VideoDoc
__all__ = ['TextDoc', 'ImageDoc', 'AudioDoc', 'Mesh3D', 'PointCloud3D', 'VideoDoc']
|
import numpy as np
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
# Save the model
model.push_to_hub(
"sparse-embedding/splade_example",
private=True,
)
# Load the model
loaded_model = SparseEncoder("sparse-embedding/splade_example")
print(f"Loaded model: {loaded_model}")
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = loaded_model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {loaded_model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = loaded_model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
# Save the model
model.push_to_hub(
"sparse-embedding/splade_example",
private=True,
)
# Load the model
loaded_model = SparseEncoder("sparse-embedding/splade_example")
print(f"Loaded model: {loaded_model}")
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = loaded_model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {loaded_model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = loaded_model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_big_accelerator,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class WanTransformer3DModelText2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors"
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
@require_big_accelerator
@require_torch_accelerator
class WanTransformer3DModelImage2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors"
repo_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
torch_dtype = torch.float8_e4m3fn
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer", torch_dtype=self.torch_dtype)
model_single_file = self.model_class.from_single_file(self.ckpt_path, torch_dtype=self.torch_dtype)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
WanTransformer3DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_big_gpu_with_torch_cuda,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class WanTransformer3DModelText2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors"
repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
@require_big_gpu_with_torch_cuda
@require_torch_accelerator
class WanTransformer3DModelImage2VideoSingleFileTest(unittest.TestCase):
model_class = WanTransformer3DModel
ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors"
repo_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers"
torch_dtype = torch.float8_e4m3fn
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer", torch_dtype=self.torch_dtype)
model_single_file = self.model_class.from_single_file(self.ckpt_path, torch_dtype=self.torch_dtype)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
|
from typing import Optional
from fastapi import FastAPI, Security
from fastapi.security import HTTPAuthorizationCredentials, HTTPDigest
from fastapi.testclient import TestClient
app = FastAPI()
security = HTTPDigest(auto_error=False)
@app.get("/users/me")
def read_current_user(
credentials: Optional[HTTPAuthorizationCredentials] = Security(security),
):
if credentials is None:
return {"msg": "Create an account first"}
return {"scheme": credentials.scheme, "credentials": credentials.credentials}
client = TestClient(app)
def test_security_http_digest():
response = client.get("/users/me", headers={"Authorization": "Digest foobar"})
assert response.status_code == 200, response.text
assert response.json() == {"scheme": "Digest", "credentials": "foobar"}
def test_security_http_digest_no_credentials():
response = client.get("/users/me")
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
def test_security_http_digest_incorrect_scheme_credentials():
response = client.get(
"/users/me", headers={"Authorization": "Other invalidauthorization"}
)
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"HTTPDigest": []}],
}
}
},
"components": {
"securitySchemes": {"HTTPDigest": {"type": "http", "scheme": "digest"}}
},
}
|
from typing import Optional
from fastapi import FastAPI, Security
from fastapi.security import HTTPAuthorizationCredentials, HTTPDigest
from fastapi.testclient import TestClient
app = FastAPI()
security = HTTPDigest(auto_error=False)
@app.get("/users/me")
def read_current_user(
credentials: Optional[HTTPAuthorizationCredentials] = Security(security),
):
if credentials is None:
return {"msg": "Create an account first"}
return {"scheme": credentials.scheme, "credentials": credentials.credentials}
client = TestClient(app)
def test_security_http_digest():
response = client.get("/users/me", headers={"Authorization": "Digest foobar"})
assert response.status_code == 200, response.text
assert response.json() == {"scheme": "Digest", "credentials": "foobar"}
def test_security_http_digest_no_credentials():
response = client.get("/users/me")
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
def test_security_http_digest_incorrect_scheme_credentials():
response = client.get(
"/users/me", headers={"Authorization": "Other invalidauthorization"}
)
assert response.status_code == 403, response.text
assert response.json() == {"detail": "Invalid authentication credentials"}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"HTTPDigest": []}],
}
}
},
"components": {
"securitySchemes": {"HTTPDigest": {"type": "http", "scheme": "digest"}}
},
}
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all TVTensors to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all tv_tensors to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial",
"Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.features import Label
from .._api import register_dataset, register_info
NAME = "eurosat"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(
categories=(
"AnnualCrop",
"Forest",
"HerbaceousVegetation",
"Highway",
"Industrial",
"Pasture",
"PermanentCrop",
"Residential",
"River",
"SeaLake",
)
)
@register_dataset(NAME)
class EuroSAT(Dataset):
"""EuroSAT Dataset.
homepage="https://github.com/phelber/eurosat",
"""
def __init__(self, root: Union[str, pathlib.Path], *, skip_integrity_check: bool = False) -> None:
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://madm.dfki.de/files/sentinel/EuroSAT.zip",
sha256="8ebea626349354c5328b142b96d0430e647051f26efc2dc974c843f25ecf70bd",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 27_000
|
import pytest
from hubble.executor.hubio import HubIO
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_pod_parser
@pytest.mark.parametrize(
'uses', ['jinaai+docker://jina-ai/DummyExecutor']
)
def test_container_pod(mocker, monkeypatch, uses):
mock = mocker.Mock()
def _mock_pull(self):
return 'docker://jinahub/dummy_executor'
monkeypatch.setattr(HubIO, 'pull', _mock_pull)
args = set_pod_parser().parse_args(['--uses', uses])
pod = PodFactory.build_pod(args)
assert pod.args.uses == 'docker://jinahub/dummy_executor'
assert pod.name == 'ContainerPod'
|
from hubble.executor.hubio import HubIO
from jina.orchestrate.pods.factory import PodFactory
from jina.parsers import set_pod_parser
def test_container_pod(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_pull(self):
return 'docker://jinahub/dummy_executor'
monkeypatch.setattr(HubIO, 'pull', _mock_pull)
args = set_pod_parser().parse_args(['--uses', 'jinahub+docker://DummyExecutor'])
pod = PodFactory.build_pod(args)
assert pod.args.uses == 'docker://jinahub/dummy_executor'
assert pod.name == 'ContainerPod'
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDocument):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 5
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocumentArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
da=da, func=cpu_intensive, backend='process', num_worker=num_workers
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocumentArray[MyMatrix]) -> DocumentArray[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batch_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocumentArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batch(
da=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: ImageDoc) -> ImageDoc:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocumentArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(da=da, func=io_intensive, backend='thread', num_worker=num_workers)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocumentArray[ImageDoc]) -> DocumentArray[ImageDoc]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batch_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocumentArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batch(
da=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
import os
from time import time
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
pytestmark = [pytest.mark.benchmark, pytest.mark.slow]
class MyMatrix(BaseDocument):
matrix: NdArray
def cpu_intensive(doc: MyMatrix) -> MyMatrix:
# some cpu intensive function
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return doc
def test_map_docs_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 5
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocumentArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs(
da=da, func=cpu_intensive, backend='process', num_worker=num_workers
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def cpu_intensive_batch(da: DocumentArray[MyMatrix]) -> DocumentArray[MyMatrix]:
# some cpu intensive function
for doc in da:
for i in range(3000):
sqrt_matrix = np.sqrt(doc.matrix)
doc.matrix = sqrt_matrix
return da
def test_map_docs_batch_multiprocessing():
if os.cpu_count() > 1:
def time_multiprocessing(num_workers: int) -> float:
n_docs = 16
rng = np.random.RandomState(0)
matrices = [rng.random(size=(1000, 1000)) for _ in range(n_docs)]
da = DocumentArray[MyMatrix]([MyMatrix(matrix=m) for m in matrices])
start_time = time()
list(
map_docs_batch(
da=da,
func=cpu_intensive_batch,
batch_size=8,
backend='process',
num_worker=num_workers,
)
)
return time() - start_time
time_1_cpu = time_multiprocessing(num_workers=1)
time_2_cpu = time_multiprocessing(num_workers=2)
assert time_2_cpu < time_1_cpu
def io_intensive(img: Image) -> Image:
# some io intensive function: load and set image url
img.tensor = img.url.load()
return img
def test_map_docs_multithreading():
def time_multithreading(num_workers: int) -> float:
n_docs = 100
da = DocumentArray[Image](
[Image(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs(da=da, func=io_intensive, backend='thread', num_worker=num_workers)
)
return time() - start_time
time_1_thread = time_multithreading(num_workers=1)
time_2_thread = time_multithreading(num_workers=2)
assert time_2_thread < time_1_thread
def io_intensive_batch(da: DocumentArray[Image]) -> DocumentArray[Image]:
# some io intensive function: load and set image url
for doc in da:
doc.tensor = doc.url.load()
return da
def test_map_docs_batch_multithreading():
def time_multithreading_batch(num_workers: int) -> float:
n_docs = 100
da = DocumentArray[Image](
[Image(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
start_time = time()
list(
map_docs_batch(
da=da,
func=io_intensive_batch,
backend='thread',
num_worker=num_workers,
batch_size=10,
)
)
return time() - start_time
time_1_thread = time_multithreading_batch(num_workers=1)
time_2_thread = time_multithreading_batch(num_workers=2)
assert time_2_thread < time_1_thread
|
from __future__ import annotations
import math
from pathlib import Path
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding_model: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding_model.tokenize(texts)
output = static_embedding_model(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding_model: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding_model.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding_model.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
import multiprocessing
import random
import time
from functools import partial
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
time.sleep(0.1 * random.random())
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('shards', [10])
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
@pytest.mark.parametrize('prefetch', [1, 10])
@pytest.mark.parametrize('concurrent', [15])
@pytest.mark.parametrize('use_stream', [False, True])
@pytest.mark.parametrize('reuse_session', [True, False])
def test_concurrent_clients(
concurrent, protocol, shards, polling, prefetch, reraise, use_stream, reuse_session
):
if not use_stream and protocol != 'grpc':
return
if reuse_session and protocol != 'http':
return
def pong(peer_hash, queue, resp: Response):
for d in resp.docs:
queue.put((peer_hash, d.text))
def peer_client(port, protocol, peer_hash, queue):
c = Client(protocol=protocol, port=port, reuse_session=reuse_session)
for _ in range(NUM_REQUESTS):
c.post(
'/ping',
Document(text=peer_hash),
on_done=lambda r: pong(peer_hash, queue, r),
return_responses=True,
stream=use_stream,
)
f = Flow(protocol=protocol, prefetch=prefetch).add(
uses=MyExecutor, shards=shards, polling=polling
)
with f:
pqueue = multiprocessing.Queue()
port = f.port
process_pool = []
for peer_id in range(concurrent):
p = multiprocessing.Process(
target=partial(peer_client, port, protocol, str(peer_id), pqueue),
daemon=True,
)
p.start()
process_pool.append(p)
for p in process_pool:
p.join()
queue_len = 0
while not pqueue.empty():
peer_hash, text = pqueue.get()
assert peer_hash == text
queue_len += 1
assert queue_len == concurrent * NUM_REQUESTS
|
import multiprocessing
import random
import time
from functools import partial
import pytest
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.types.request.data import Response
NUM_REQUESTS = 5
class MyExecutor(Executor):
@requests(on='/ping')
def ping(self, **kwargs):
time.sleep(0.1 * random.random())
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('shards', [10])
@pytest.mark.parametrize('polling', ['ANY', 'ALL'])
@pytest.mark.parametrize('prefetch', [1, 10])
@pytest.mark.parametrize('concurrent', [15])
@pytest.mark.parametrize('use_stream', [False, True])
def test_concurrent_clients(
concurrent, protocol, shards, polling, prefetch, reraise, use_stream
):
if not use_stream and protocol != 'grpc':
return
def pong(peer_hash, queue, resp: Response):
for d in resp.docs:
queue.put((peer_hash, d.text))
def peer_client(port, protocol, peer_hash, queue):
c = Client(protocol=protocol, port=port)
for _ in range(NUM_REQUESTS):
c.post(
'/ping',
Document(text=peer_hash),
on_done=lambda r: pong(peer_hash, queue, r),
return_responses=True,
stream=use_stream,
)
f = Flow(protocol=protocol, prefetch=prefetch).add(
uses=MyExecutor, shards=shards, polling=polling
)
with f:
pqueue = multiprocessing.Queue()
port = f.port
process_pool = []
for peer_id in range(concurrent):
p = multiprocessing.Process(
target=partial(peer_client, port, protocol, str(peer_id), pqueue),
daemon=True,
)
p.start()
process_pool.append(p)
for p in process_pool:
p.join()
queue_len = 0
while not pqueue.empty():
peer_hash, text = pqueue.get()
assert peer_hash == text
queue_len += 1
assert queue_len == concurrent * NUM_REQUESTS
|
from typing import Generator, Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: ImageDoc) -> ImageDoc:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocumentArray[ImageDoc](
[ImageDoc(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)]
)
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocumentArray[ImageDoc]([ImageDoc(id=i) for i in range(N_DOCS)])
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocumentArray) -> DocumentArray:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDocument):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batch(n_docs, batch_size, backend):
da = DocumentArray[MyImage](
[MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
it = map_docs_batch(
da=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocumentArray[MyImage])
|
from typing import Generator, Optional
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.typing import ImageUrl, NdArray
from docarray.utils.map import map_docs, map_docs_batch
from tests.units.typing.test_bytes import IMAGE_PATHS
N_DOCS = 2
def load_from_doc(d: Image) -> Image:
if d.url is not None:
d.tensor = d.url.load()
return d
@pytest.fixture()
def da():
da = DocumentArray[Image]([Image(url=IMAGE_PATHS['png']) for _ in range(N_DOCS)])
return da
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map(da, backend):
for tensor in da.tensor:
assert tensor is None
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for doc in docs:
assert doc.tensor is not None
def test_map_multiprocessing_lambda_func_raise_exception(da):
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=lambda x: x, backend='process'))
def test_map_multiprocessing_local_func_raise_exception(da):
def local_func(x):
return x
with pytest.raises(ValueError, match='Multiprocessing does not allow'):
list(map_docs(da=da, func=local_func, backend='process'))
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_check_order(backend):
da = DocumentArray[Image]([Image(id=i) for i in range(N_DOCS)])
docs = list(map_docs(da=da, func=load_from_doc, backend=backend))
assert len(docs) == N_DOCS
for i, doc in enumerate(docs):
assert doc.id == str(i)
def load_from_da(da: DocumentArray) -> DocumentArray:
for doc in da:
doc.tensor = doc.url.load()
return da
class MyImage(BaseDocument):
tensor: Optional[NdArray]
url: ImageUrl
@pytest.mark.slow
@pytest.mark.parametrize('n_docs,batch_size', [(10, 5), (10, 8)])
@pytest.mark.parametrize('backend', ['thread', 'process'])
def test_map_docs_batch(n_docs, batch_size, backend):
da = DocumentArray[MyImage](
[MyImage(url=IMAGE_PATHS['png']) for _ in range(n_docs)]
)
it = map_docs_batch(
da=da, func=load_from_da, batch_size=batch_size, backend=backend
)
assert isinstance(it, Generator)
for batch in it:
assert isinstance(batch, DocumentArray[MyImage])
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
import enum
import typing
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
class WsMessage(pydantic.BaseModel):
method: Methods
data: typing.Dict[str, typing.Any] | list[typing.Any] | None = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: typing.List[APIKeyPermission]
description: typing.Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: typing.List[APIKeyPermission]
|
import enum
import typing
import pydantic
import backend.data.graph
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
class WsMessage(pydantic.BaseModel):
method: Methods
data: typing.Dict[str, typing.Any] | list[typing.Any] | None = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
|
"""Run smoke tests"""
import os
from pathlib import Path
from sys import platform
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
from pathlib import Path
from sys import platform
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../common/lsj-100e_coco-instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'NdArrayEmbedding',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Training parameters
model_name = "distilroberta-base"
train_batch_size = 128
max_seq_length = 32
num_epochs = 1
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_simcse{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_samples = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
logging.info("Train sentences: {}".format(len(train_samples)))
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
SimCSE will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_simcse_from_file.py path/to/sentences.txt
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, InputExample
import logging
from datetime import datetime
import gzip
import sys
import tqdm
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
# Training parameters
model_name = 'distilroberta-base'
train_batch_size = 128
max_seq_length = 32
num_epochs = 1
#Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ''
if len(sys.argv) >= 3:
output_name = "-"+sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = 'output/train_simcse{}-{}'.format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_samples = []
with gzip.open(filepath, 'rt', encoding='utf8') if filepath.endswith('.gz') else open(filepath, encoding='utf8') as fIn:
for line in tqdm.tqdm(fIn, desc='Read file'):
line = line.strip()
if len(line) >= 10:
train_samples.append(InputExample(texts=[line, line]))
logging.info("Train sentences: {}".format(len(train_samples)))
# We train our model using the MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={'lr': 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False # Set to True, if your GPU supports FP16 cores
)
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json")) as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDoc, DocArray
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDoc):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocAray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
.. code-block:: python
from docarray import DocArray
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocArray[Document]([Document() for _ in range(10)])
doc.chunks = DocArray[Document]([Document() for _ in range(10)])
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocArray[LegacyDocument]]
matches: Optional[DocArray[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
from __future__ import annotations
from typing import Any, Dict, Optional
from docarray import BaseDocument, DocumentArray
from docarray.typing import AnyEmbedding, AnyTensor
class LegacyDocument(BaseDocument):
"""
This Document is the LegacyDocument. It follows the same schema as in DocArray v1.
It can be useful to start migrating a codebase from v1 to v2.
Nevertheless, the API is not totally compatible with DocAray v1 `Document`.
Indeed, none of the method associated with `Document` are present. Only the schema
of the data is similar.
.. code-block:: python
from docarray import DocumentArray
from docarray.documents.legacy import LegacyDocument
import numpy as np
doc = LegacyDocument(text='hello')
doc.url = 'http://myimg.png'
doc.tensor = np.zeros((3, 224, 224))
doc.embedding = np.zeros((100, 1))
doc.tags['price'] = 10
doc.chunks = DocumentArray[Document]([Document() for _ in range(10)])
doc.chunks = DocumentArray[Document]([Document() for _ in range(10)])
"""
tensor: Optional[AnyTensor]
chunks: Optional[DocumentArray[LegacyDocument]]
matches: Optional[DocumentArray[LegacyDocument]]
blob: Optional[bytes]
text: Optional[str]
url: Optional[str]
embedding: Optional[AnyEmbedding]
tags: Dict[str, Any] = dict()
scores: Optional[Dict[str, Any]]
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import ATSSHead
class TestATSSHead(TestCase):
def test_atss_head_loss(self):
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1
}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = ATSSHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss_by_feat(cls_scores, bbox_preds,
centernesses, [gt_instances],
img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import ATSSHead
class TestATSSHead(TestCase):
def test_atss_head_loss(self):
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{'img_shape': (s, s, 3), 'scale_factor': 1}]
cfg = Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
atss_head = ATSSHead(
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=1,
norm_cfg=None,
train_cfg=cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds, centernesses = atss_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = atss_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_centerness_loss.item(), 0,
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = atss_head.loss(cls_scores, bbox_preds, centernesses,
[gt_instances], img_metas)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_centerness_loss.item(), 0,
'centerness loss should be non-zero')
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from pathlib import Path
import pytest
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
@pytest.mark.skip(reason="This test is rather slow, and the LabelAccuracyEvaluator is not commonly used.")
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer, tmp_path: Path) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model, output_path=str(tmp_path))
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
"""
Tests the correct computation of evaluation scores from BinaryClassificationEvaluator
"""
from __future__ import annotations
import csv
import gzip
import os
from pathlib import Path
from torch.utils.data import DataLoader
from sentence_transformers import (
InputExample,
SentenceTransformer,
evaluation,
losses,
util,
)
def test_LabelAccuracyEvaluator(paraphrase_distilroberta_base_v1_model: SentenceTransformer, tmp_path: Path) -> None:
"""Tests that the LabelAccuracyEvaluator can be loaded correctly"""
model = paraphrase_distilroberta_base_v1_model
nli_dataset_path = "datasets/AllNLI.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
dev_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
if len(dev_samples) >= 100:
break
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=len(label2int),
)
dev_dataloader = DataLoader(dev_samples, shuffle=False, batch_size=16)
evaluator = evaluation.LabelAccuracyEvaluator(dev_dataloader, softmax_model=train_loss)
metrics = evaluator(model, output_path=str(tmp_path))
assert "accuracy" in metrics
assert metrics["accuracy"] > 0.2
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
'TaskAlignedAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner'
]
|
from collections import ChainMap
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
MutableMapping,
Type,
TypeVar,
Union,
)
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.doc_vec.doc_vec import DocVec
IndexIterType = Union[slice, Iterable[int], Iterable[bool], None]
T = TypeVar('T', bound='ColumnStorage')
class ColumnStorage:
"""
ColumnStorage is a container to store the columns of the
:class:`~docarray.array.doc_vec.DocVec`.
:param tensor_columns: a Dict of AbstractTensor
:param doc_columns: a Dict of :class:`~docarray.array.doc_vec.DocVec`
:param docs_vec_columns: a Dict of List of :class:`~docarray.array.doc_vec.DocVec`
:param any_columns: a Dict of List
:param tensor_type: Class used to wrap the doc_vec tensors
"""
def __init__(
self,
tensor_columns: Dict[str, AbstractTensor],
doc_columns: Dict[str, 'DocVec'],
docs_vec_columns: Dict[str, ListAdvancedIndexing['DocVec']],
any_columns: Dict[str, ListAdvancedIndexing],
tensor_type: Type[AbstractTensor] = NdArray,
):
self.tensor_columns = tensor_columns
self.doc_columns = doc_columns
self.docs_vec_columns = docs_vec_columns
self.any_columns = any_columns
self.tensor_type = tensor_type
self.columns = ChainMap( # type: ignore
self.tensor_columns, # type: ignore
self.doc_columns, # type: ignore
self.docs_vec_columns, # type: ignore
self.any_columns, # type: ignore
) # type: ignore
def __len__(self) -> int:
return len(self.any_columns['id']) # TODO what if ID are None ?
def __getitem__(self: T, item: IndexIterType) -> T:
if isinstance(item, tuple):
item = list(item)
tensor_columns = {key: col[item] for key, col in self.tensor_columns.items()}
doc_columns = {key: col[item] for key, col in self.doc_columns.items()}
docs_vec_columns = {
key: col[item] for key, col in self.docs_vec_columns.items()
}
any_columns = {key: col[item] for key, col in self.any_columns.items()}
return self.__class__(
tensor_columns,
doc_columns,
docs_vec_columns,
any_columns,
self.tensor_type,
)
class ColumnStorageView(dict, MutableMapping[str, Any]):
index: int
storage: ColumnStorage
def __init__(self, index: int, storage: ColumnStorage):
super().__init__()
self.index = index
self.storage = storage
def __getitem__(self, name: str) -> Any:
if name in self.storage.tensor_columns.keys():
tensor = self.storage.tensor_columns[name]
if tensor.get_comp_backend().n_dim(tensor) == 1:
# to ensure consistensy between numpy and pytorch
# we wrap the scalr in a tensor of ndim = 1
# otherwise numpy pass by value whereas torch by reference
return self.storage.tensor_columns[name][self.index : self.index + 1]
return self.storage.columns[name][self.index]
def __setitem__(self, name, value) -> None:
self.storage.columns[name][self.index] = value
def __delitem__(self, key):
raise RuntimeError('Cannot delete an item from a StorageView')
def __iter__(self):
return self.storage.columns.keys()
def __len__(self):
return len(self.storage.columns)
|
from collections import ChainMap
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
MutableMapping,
Type,
TypeVar,
Union,
)
from docarray.array.doc_vec.list_advance_indexing import ListAdvancedIndexing
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.doc_vec.doc_vec import DocVec
IndexIterType = Union[slice, Iterable[int], Iterable[bool], None]
T = TypeVar('T', bound='ColumnStorage')
class ColumnStorage:
"""
ColumnStorage is a container to store the columns of the
:class:`~docarray.array.doc_vec.DocVec`.
:param tensor_columns: a Dict of AbstractTensor
:param doc_columns: a Dict of :class:`~docarray.array.doc_vec.DocVec`
:param docs_vec_columns: a Dict of List of :class:`~docarray.array.doc_vec.DocVec`
:param any_columns: a Dict of List
:param tensor_type: Class used to wrap the doc_vec tensors
"""
def __init__(
self,
tensor_columns: Dict[str, AbstractTensor],
doc_columns: Dict[str, 'DocVec'],
docs_vec_columns: Dict[str, ListAdvancedIndexing['DocVec']],
any_columns: Dict[str, ListAdvancedIndexing],
tensor_type: Type[AbstractTensor] = NdArray,
):
self.tensor_columns = tensor_columns
self.doc_columns = doc_columns
self.docs_vec_columns = docs_vec_columns
self.any_columns = any_columns
self.tensor_type = tensor_type
self.columns = ChainMap( # type: ignore
self.tensor_columns, # type: ignore
self.doc_columns, # type: ignore
self.docs_vec_columns, # type: ignore
self.any_columns, # type: ignore
) # type: ignore
def __len__(self) -> int:
return len(self.any_columns['id']) # TODO what if ID are None ?
def __getitem__(self: T, item: IndexIterType) -> T:
if isinstance(item, tuple):
item = list(item)
tensor_columns = {key: col[item] for key, col in self.tensor_columns.items()}
doc_columns = {key: col[item] for key, col in self.doc_columns.items()}
docs_vec_columns = {
key: col[item] for key, col in self.docs_vec_columns.items()
}
any_columns = {key: col[item] for key, col in self.any_columns.items()}
return self.__class__(
tensor_columns,
doc_columns,
docs_vec_columns,
any_columns,
self.tensor_type,
)
class ColumnStorageView(dict, MutableMapping[str, Any]):
index: int
storage: ColumnStorage
def __init__(self, index: int, storage: ColumnStorage):
super().__init__()
self.index = index
self.storage = storage
def __getitem__(self, name: str) -> Any:
if name in self.storage.tensor_columns.keys():
tensor = self.storage.tensor_columns[name]
if tensor.get_comp_backend().n_dim(tensor) == 1:
# to ensure consistensy between numpy and pytorch
# we wrap the scalr in a tensor of ndim = 1
# otherwise numpy pass by value whereas torch by reference
return self.storage.tensor_columns[name][self.index : self.index + 1]
return self.storage.columns[name][self.index]
def __setitem__(self, name, value) -> None:
self.storage.columns[name][self.index] = value
def __delitem__(self, key):
raise RuntimeError('Cannot delete an item from a StorageView')
def __iter__(self):
return self.storage.columns.keys()
def __len__(self):
return len(self.storage.columns)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.3'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.2'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import setup_cache_size_limit_of_dynamo
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# training speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Optional, TypeVar
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
import pytest
from importlib.util import find_spec
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
no_packages = find_spec("psycopg2") is not None and find_spec("sqlalchemy") is not None and find_spec("asyncpg") is not None
def test_class():
names_of_base_classes = [b.__name__ for b in PostgresKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
@pytest.mark.skipif(
no_packages, reason="asyncpg, pscopg2-binary and sqlalchemy not installed"
)
def test_initialization():
errors = []
try:
pgstore1 = PostgresKVStore(table_name="mytable")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore2 = PostgresKVStore(table_name="mytable", connection_string="connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore3 = PostgresKVStore(table_name="mytable", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
try:
pgstore4 = PostgresKVStore(table_name="mytable", connection_string="connection_string", async_connection_string="async_connection_string")
errors.append(0)
except ValueError:
errors.append(1)
assert sum(errors) == 3
assert pgstore4._engine is None
assert pgstore4._async_engine is None
|
from llama_index.core.storage.kvstore.types import BaseKVStore
from llama_index.storage.kvstore.postgres import PostgresKVStore
def test_class():
names_of_base_classes = [b.__name__ for b in PostgresKVStore.__mro__]
assert BaseKVStore.__name__ in names_of_base_classes
|
from datetime import datetime, timedelta
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotEngagementBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (send_email, track_engagement)",
default="send_email",
)
email_data: dict = SchemaField(
description="Email data including recipient, subject, content",
default_factory=dict,
)
contact_id: str = SchemaField(
description="Contact ID for engagement tracking", default=""
)
timeframe_days: int = SchemaField(
description="Number of days to look back for engagement",
default=30,
)
class Output(BlockSchema):
result: dict = SchemaField(description="Operation result")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="c6524385-7d87-49d6-a470-248bd29ca765",
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
input_schema=HubSpotEngagementBlock.Input,
output_schema=HubSpotEngagementBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "send_email":
# Using the email send API
email_url = f"{base_url}/crm/v3/objects/emails"
email_data = {
"properties": {
"hs_timestamp": datetime.now().isoformat(),
"hubspot_owner_id": "1", # This should be configurable
"hs_email_direction": "OUTBOUND",
"hs_email_status": "SEND",
"hs_email_subject": input_data.email_data.get("subject"),
"hs_email_text": input_data.email_data.get("content"),
"hs_email_to_email": input_data.email_data.get("recipient"),
}
}
response = requests.post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
elif input_data.operation == "track_engagement":
# Get engagement events for the contact
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
engagement_url = (
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
)
params = {"limit": 100, "after": from_date.isoformat()}
response = requests.get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics
metrics = {
"email_opens": 0,
"email_clicks": 0,
"email_replies": 0,
"last_engagement": None,
"engagement_score": 0,
}
for engagement in engagements.get("results", []):
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
if eng_type == "EMAIL":
metrics["email_opens"] += 1
elif eng_type == "EMAIL_CLICK":
metrics["email_clicks"] += 1
elif eng_type == "EMAIL_REPLY":
metrics["email_replies"] += 1
# Update last engagement time
eng_time = engagement.get("properties", {}).get("hs_timestamp")
if eng_time and (
not metrics["last_engagement"]
or eng_time > metrics["last_engagement"]
):
metrics["last_engagement"] = eng_time
# Calculate simple engagement score
metrics["engagement_score"] = (
metrics["email_opens"]
+ metrics["email_clicks"] * 2
+ metrics["email_replies"] * 3
)
yield "result", metrics
yield "status", "engagement_tracked"
|
from datetime import datetime, timedelta
from backend.blocks.hubspot._auth import (
HubSpotCredentials,
HubSpotCredentialsField,
HubSpotCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
class HubSpotEngagementBlock(Block):
class Input(BlockSchema):
credentials: HubSpotCredentialsInput = HubSpotCredentialsField()
operation: str = SchemaField(
description="Operation to perform (send_email, track_engagement)",
default="send_email",
)
email_data: dict = SchemaField(
description="Email data including recipient, subject, content",
default={},
)
contact_id: str = SchemaField(
description="Contact ID for engagement tracking", default=""
)
timeframe_days: int = SchemaField(
description="Number of days to look back for engagement",
default=30,
)
class Output(BlockSchema):
result: dict = SchemaField(description="Operation result")
status: str = SchemaField(description="Operation status")
def __init__(self):
super().__init__(
id="c6524385-7d87-49d6-a470-248bd29ca765",
description="Manages HubSpot engagements - sends emails and tracks engagement metrics",
categories={BlockCategory.CRM, BlockCategory.COMMUNICATION},
input_schema=HubSpotEngagementBlock.Input,
output_schema=HubSpotEngagementBlock.Output,
)
def run(
self, input_data: Input, *, credentials: HubSpotCredentials, **kwargs
) -> BlockOutput:
base_url = "https://api.hubapi.com"
headers = {
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
"Content-Type": "application/json",
}
if input_data.operation == "send_email":
# Using the email send API
email_url = f"{base_url}/crm/v3/objects/emails"
email_data = {
"properties": {
"hs_timestamp": datetime.now().isoformat(),
"hubspot_owner_id": "1", # This should be configurable
"hs_email_direction": "OUTBOUND",
"hs_email_status": "SEND",
"hs_email_subject": input_data.email_data.get("subject"),
"hs_email_text": input_data.email_data.get("content"),
"hs_email_to_email": input_data.email_data.get("recipient"),
}
}
response = requests.post(email_url, headers=headers, json=email_data)
result = response.json()
yield "result", result
yield "status", "email_sent"
elif input_data.operation == "track_engagement":
# Get engagement events for the contact
from_date = datetime.now() - timedelta(days=input_data.timeframe_days)
engagement_url = (
f"{base_url}/crm/v3/objects/contacts/{input_data.contact_id}/engagement"
)
params = {"limit": 100, "after": from_date.isoformat()}
response = requests.get(engagement_url, headers=headers, params=params)
engagements = response.json()
# Process engagement metrics
metrics = {
"email_opens": 0,
"email_clicks": 0,
"email_replies": 0,
"last_engagement": None,
"engagement_score": 0,
}
for engagement in engagements.get("results", []):
eng_type = engagement.get("properties", {}).get("hs_engagement_type")
if eng_type == "EMAIL":
metrics["email_opens"] += 1
elif eng_type == "EMAIL_CLICK":
metrics["email_clicks"] += 1
elif eng_type == "EMAIL_REPLY":
metrics["email_replies"] += 1
# Update last engagement time
eng_time = engagement.get("properties", {}).get("hs_timestamp")
if eng_time and (
not metrics["last_engagement"]
or eng_time > metrics["last_engagement"]
):
metrics["last_engagement"] = eng_time
# Calculate simple engagement score
metrics["engagement_score"] = (
metrics["email_opens"]
+ metrics["email_clicks"] * 2
+ metrics["email_replies"] * 3
)
yield "result", metrics
yield "status", "engagement_tracked"
|
from fastapi import FastAPI
from backend.server.middleware.security import SecurityHeadersMiddleware
from .routes.v1 import v1_router
external_app = FastAPI(
title="AutoGPT External API",
description="External API for AutoGPT integrations",
docs_url="/docs",
version="1.0",
)
external_app.add_middleware(SecurityHeadersMiddleware)
external_app.include_router(v1_router, prefix="/v1")
|
from fastapi import FastAPI
from .routes.v1 import v1_router
external_app = FastAPI(
title="AutoGPT External API",
description="External API for AutoGPT integrations",
docs_url="/docs",
version="1.0",
)
external_app.include_router(v1_router, prefix="/v1")
|
from typing import Any, Dict
import requests
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import Response
from llama_index.core.schema import QueryBundle
class CogniswitchQueryEngine(BaseQueryEngine):
def __init__(self, cs_token: str, OAI_token: str, apiKey: str) -> None:
"""
The required fields.
Args:
cs_token (str): Cogniswitch token.
OAI_token (str): OpenAI token.
apiKey (str): Oauth token.
"""
self.cs_token = cs_token
self.OAI_token = OAI_token
self.apiKey = apiKey
self.knowledge_request_endpoint = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest"
)
self.headers = {
"apiKey": self.apiKey,
"platformToken": self.cs_token,
"openAIToken": self.OAI_token,
}
def query_knowledge(self, query: str) -> Response:
"""
Send a query to the Cogniswitch service and retrieve the response.
Args:
query (str): Query to be answered.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
data = {"query": query}
response = requests.post(
self.knowledge_request_endpoint,
headers=self.headers,
data=data,
)
if response.status_code == 200:
resp = response.json()
answer = resp["data"]["answer"]
return Response(response=answer)
else:
error_message = response.json()["message"]
return Response(response=error_message)
def _query(self, query_bundle: QueryBundle) -> Response:
return self.query_knowledge(query_bundle.query_str)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
return self.query_knowledge(query_bundle.query_str)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
|
from typing import Any, Dict
import requests
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import Response
from llama_index.core.schema import QueryBundle
class CogniswitchQueryEngine(BaseQueryEngine):
def __init__(self, cs_token: str, OAI_token: str, apiKey: str) -> None:
"""The required fields.
Args:
cs_token (str): Cogniswitch token.
OAI_token (str): OpenAI token.
apiKey (str): Oauth token.
"""
self.cs_token = cs_token
self.OAI_token = OAI_token
self.apiKey = apiKey
self.knowledge_request_endpoint = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest"
)
self.headers = {
"apiKey": self.apiKey,
"platformToken": self.cs_token,
"openAIToken": self.OAI_token,
}
def query_knowledge(self, query: str) -> Response:
"""
Send a query to the Cogniswitch service and retrieve the response.
Args:
query (str): Query to be answered.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
data = {"query": query}
response = requests.post(
self.knowledge_request_endpoint,
headers=self.headers,
data=data,
)
if response.status_code == 200:
resp = response.json()
answer = resp["data"]["answer"]
return Response(response=answer)
else:
error_message = response.json()["message"]
return Response(response=error_message)
def _query(self, query_bundle: QueryBundle) -> Response:
return self.query_knowledge(query_bundle.query_str)
async def _aquery(self, query_bundle: QueryBundle) -> Response:
return self.query_knowledge(query_bundle.query_str)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompts."""
return {}
|
from typing import Any, Dict, Union
import torch
from torchvision import transforms as _transforms
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertDtype(Transform):
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints.TensorImageType, datapoints.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBoxes(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
import torch
from torchvision import transforms as _transforms
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
# We need to unwrap here to avoid unnecessary `__torch_function__` calls,
# since `convert_format_bounding_box` does not have a dispatcher function that would do that for us
output = F.convert_format_bounding_box(
inpt.as_subclass(torch.Tensor), old_format=inpt.format, new_format=params["format"]
)
return datapoints.BoundingBox.wrap_like(inpt, output, format=params["format"])
class ConvertDtype(Transform):
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(
self, inpt: Union[datapoints.TensorImageType, datapoints.TensorVideoType], params: Dict[str, Any]
) -> Union[datapoints.TensorImageType, datapoints.TensorVideoType]:
return F.convert_dtype(inpt, self.dtype)
# We changed the name to align it with the new naming scheme. Still, `ConvertImageDtype` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ConvertImageDtype = ConvertDtype
class ClampBoundingBoxes(Transform):
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
import os
from typing import List
import pytest
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import Document, TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
from pymongo import MongoClient
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
import threading
lock = threading.Lock()
@pytest.fixture(scope="session")
def documents() -> List[Document]:
"""
List of documents represents data to be embedded in the datastore.
Minimum requirements for Documents in the /upsert endpoint's UpsertRequest.
"""
text = Document.example().text
metadata = Document.example().metadata
texts = text.split("\n")
return [Document(text=text, metadata={"text": text}) for text in texts]
@pytest.fixture(scope="session")
def nodes(documents) -> List[TextNode]:
if OPENAI_API_KEY is None:
return None
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
return pipeline.run(documents=documents)
DB_NAME = os.environ.get("MONGODB_DATABASE", "llama_index_test_db")
collection_name = os.environ.get("MONGODB_COLLECTION", "llama_index_test_vectorstore")
vector_index_name = os.environ.get("MONGODB_INDEX", "vector_index")
MONGODB_URI = os.environ.get("MONGODB_URI")
@pytest.fixture(scope="session")
def atlas_client() -> MongoClient:
if MONGODB_URI is None:
return None
client = MongoClient(MONGODB_URI)
assert DB_NAME in client.list_database_names()
return client
@pytest.fixture()
def vector_store(atlas_client: MongoClient) -> MongoDBAtlasVectorSearch:
if MONGODB_URI is None:
return None
return MongoDBAtlasVectorSearch(
mongodb_client=atlas_client,
db_name=DB_NAME,
collection_name=collection_name,
vector_index_name=vector_index_name,
fulltext_index_name="fulltext_index",
)
|
import os
from typing import List
import pytest
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.schema import Document, TextNode
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.vector_stores.mongodb import MongoDBAtlasVectorSearch
from pymongo import MongoClient
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
import threading
lock = threading.Lock()
@pytest.fixture(scope="session")
def documents() -> List[Document]:
"""List of documents represents data to be embedded in the datastore.
Minimum requirements for Documents in the /upsert endpoint's UpsertRequest.
"""
text = Document.example().text
metadata = Document.example().metadata
texts = text.split("\n")
return [Document(text=text, metadata={"text": text}) for text in texts]
@pytest.fixture(scope="session")
def nodes(documents) -> List[TextNode]:
if OPENAI_API_KEY is None:
return None
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(chunk_size=1024, chunk_overlap=200),
OpenAIEmbedding(),
],
)
return pipeline.run(documents=documents)
DB_NAME = os.environ.get("MONGODB_DATABASE", "llama_index_test_db")
collection_name = os.environ.get("MONGODB_COLLECTION", "llama_index_test_vectorstore")
vector_index_name = os.environ.get("MONGODB_INDEX", "vector_index")
MONGODB_URI = os.environ.get("MONGODB_URI")
@pytest.fixture(scope="session")
def atlas_client() -> MongoClient:
if MONGODB_URI is None:
return None
client = MongoClient(MONGODB_URI)
assert DB_NAME in client.list_database_names()
return client
@pytest.fixture()
def vector_store(atlas_client: MongoClient) -> MongoDBAtlasVectorSearch:
if MONGODB_URI is None:
return None
return MongoDBAtlasVectorSearch(
mongodb_client=atlas_client,
db_name=DB_NAME,
collection_name=collection_name,
vector_index_name=vector_index_name,
fulltext_index_name="fulltext_index",
)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`~sentence_transformers.evaluation.TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_to_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends :class:`TranslationEvaluator` but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTranslationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_to_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class TestModel(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: Optional[str] = Field(
description="Additional fields", default=None
)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """```yaml
---
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """```yaml
action: update
action_input: The yamlOutputParser class is powerful
additional_fields: null
```"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline:\n",
)
@pytest.mark.parametrize("result", [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) -> None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
model = yaml_parser.parse(result)
print("parse_result:", result) # noqa: T201
assert model == DEF_EXPECTED_RESULT
def test_yaml_output_parser_fail() -> None:
"""Test YamlOutputParser where completion result fails schema validation."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
try:
yaml_parser.parse(DEF_RESULT_FAIL)
except OutputParserException as e:
print("parse_result:", e) # noqa: T201
assert "Failed to parse TestModel from completion" in str(e)
else:
assert False, "Expected OutputParserException"
def test_yaml_output_parser_output_type() -> None:
"""Test YamlOutputParser OutputType."""
yaml_parser = YamlOutputParser(pydantic_object=TestModel)
assert yaml_parser.OutputType is TestModel
|
"""Test yamlOutputParser"""
from enum import Enum
from typing import Optional
import pytest
from langchain_core.exceptions import OutputParserException
from pydantic import BaseModel, Field
from langchain.output_parsers.yaml import YamlOutputParser
class Actions(Enum):
SEARCH = "Search"
CREATE = "Create"
UPDATE = "Update"
DELETE = "Delete"
class TestModel(BaseModel):
action: Actions = Field(description="Action to be performed")
action_input: str = Field(description="Input to be used in the action")
additional_fields: Optional[str] = Field(
description="Additional fields", default=None
)
for_new_lines: str = Field(description="To be used to test newlines")
# Prevent pytest from trying to run tests on TestModel
TestModel.__test__ = False # type: ignore[attr-defined]
DEF_RESULT = """```yaml
---
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
```"""
DEF_RESULT_NO_BACKTICKS = """
action: Update
action_input: The yamlOutputParser class is powerful
additional_fields: null
for_new_lines: |
not_escape_newline:
escape_newline:
"""
# action 'update' with a lowercase 'u' to test schema validation failure.
DEF_RESULT_FAIL = """```yaml
action: update
action_input: The yamlOutputParser class is powerful
additional_fields: null
```"""
DEF_EXPECTED_RESULT = TestModel(
action=Actions.UPDATE,
action_input="The yamlOutputParser class is powerful",
additional_fields=None,
for_new_lines="not_escape_newline:\n escape_newline:\n",
)
@pytest.mark.parametrize("result", [DEF_RESULT, DEF_RESULT_NO_BACKTICKS])
def test_yaml_output_parser(result: str) -> None:
"""Test yamlOutputParser."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
model = yaml_parser.parse(result)
print("parse_result:", result) # noqa: T201
assert DEF_EXPECTED_RESULT == model
def test_yaml_output_parser_fail() -> None:
"""Test YamlOutputParser where completion result fails schema validation."""
yaml_parser: YamlOutputParser[TestModel] = YamlOutputParser(
pydantic_object=TestModel
)
try:
yaml_parser.parse(DEF_RESULT_FAIL)
except OutputParserException as e:
print("parse_result:", e) # noqa: T201
assert "Failed to parse TestModel from completion" in str(e)
else:
assert False, "Expected OutputParserException"
def test_yaml_output_parser_output_type() -> None:
"""Test YamlOutputParser OutputType."""
yaml_parser = YamlOutputParser(pydantic_object=TestModel)
assert yaml_parser.OutputType is TestModel
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmengine.logging import HistoryBuffer
array_method = [np.array, lambda x: x]
try:
import torch
except ImportError:
pass
else:
array_method.append(torch.tensor)
@HistoryBuffer.register_statistics
def custom_statistics(self):
return -1
class TestLoggerBuffer:
def test_init(self):
log_buffer = HistoryBuffer()
assert log_buffer.max_length == 1000000
log_history, counts = log_buffer.data
assert len(log_history) == 0
assert len(counts) == 0
# test the length of array exceed `max_length`
logs = np.random.randint(1, 10, log_buffer.max_length + 1)
counts = np.random.randint(1, 10, log_buffer.max_length + 1)
log_buffer = HistoryBuffer(logs, counts)
log_history, count_history = log_buffer.data
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
assert logs[1] == log_history[0]
assert counts[1] == count_history[0]
# The different lengths of `log_history` and `count_history` will
# raise error
with pytest.raises(AssertionError):
HistoryBuffer([1, 2], [1])
@pytest.mark.parametrize('array_method', array_method)
def test_update(self, array_method):
# test `update` method
log_buffer = HistoryBuffer()
log_history = array_method([1, 2, 3, 4, 5])
count_history = array_method([5, 5, 5, 5, 5])
for i in range(len(log_history)):
log_buffer.update(float(log_history[i]), float(count_history[i]))
recorded_history, recorded_count = log_buffer.data
for a, b in zip(log_history, recorded_history):
assert float(a) == float(b)
for a, b in zip(count_history, recorded_count):
assert float(a) == float(b)
# test the length of `array` exceed `max_length`
max_array = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
max_count = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
log_buffer = HistoryBuffer(max_array, max_count)
log_buffer.update(1)
log_history, count_history = log_buffer.data
assert log_history[0] == 1
assert count_history[0] == 1
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
# Update an iterable object will raise a type error, `log_val` and
# `count` should be single value
with pytest.raises(TypeError):
log_buffer.update(array_method([1, 2]))
@pytest.mark.parametrize('statistics_method, log_buffer_type',
[(np.min, 'min'), (np.max, 'max')])
def test_max_min(self, statistics_method, log_buffer_type):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert statistics_method(log_history[-10:]) == \
getattr(log_buffer, log_buffer_type)(10)
assert statistics_method(log_history) == \
getattr(log_buffer, log_buffer_type)()
def test_mean(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert np.sum(log_history[-10:]) / \
np.sum(count_history[-10:]) == \
log_buffer.mean(10)
assert np.sum(log_history) / \
np.sum(count_history) == \
log_buffer.mean()
def test_current(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert log_history[-1] == log_buffer.current()
# test get empty array
log_buffer = HistoryBuffer()
with pytest.raises(ValueError):
log_buffer.current()
def test_statistics(self):
log_history = np.array([1, 2, 3, 4, 5])
count_history = np.array([1, 1, 1, 1, 1])
log_buffer = HistoryBuffer(log_history, count_history)
assert log_buffer.statistics('mean') == 3
assert log_buffer.statistics('min') == 1
assert log_buffer.statistics('max') == 5
assert log_buffer.statistics('current') == 5
# Access unknown method will raise an error.
with pytest.raises(KeyError):
log_buffer.statistics('unknown')
def test_register_statistics(self):
log_buffer = HistoryBuffer()
assert log_buffer.statistics('custom_statistics') == -1
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmengine.logging import HistoryBuffer
array_method = [np.array, lambda x: x]
try:
import torch
except ImportError:
pass
else:
array_method.append(torch.tensor)
class TestLoggerBuffer:
def test_init(self):
log_buffer = HistoryBuffer()
assert log_buffer.max_length == 1000000
log_history, counts = log_buffer.data
assert len(log_history) == 0
assert len(counts) == 0
# test the length of array exceed `max_length`
logs = np.random.randint(1, 10, log_buffer.max_length + 1)
counts = np.random.randint(1, 10, log_buffer.max_length + 1)
log_buffer = HistoryBuffer(logs, counts)
log_history, count_history = log_buffer.data
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
assert logs[1] == log_history[0]
assert counts[1] == count_history[0]
# The different lengths of `log_history` and `count_history` will
# raise error
with pytest.raises(AssertionError):
HistoryBuffer([1, 2], [1])
@pytest.mark.parametrize('array_method', array_method)
def test_update(self, array_method):
# test `update` method
log_buffer = HistoryBuffer()
log_history = array_method([1, 2, 3, 4, 5])
count_history = array_method([5, 5, 5, 5, 5])
for i in range(len(log_history)):
log_buffer.update(float(log_history[i]), float(count_history[i]))
recorded_history, recorded_count = log_buffer.data
for a, b in zip(log_history, recorded_history):
assert float(a) == float(b)
for a, b in zip(count_history, recorded_count):
assert float(a) == float(b)
# test the length of `array` exceed `max_length`
max_array = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
max_count = array_method([[-1] + [1] * (log_buffer.max_length - 1)])
log_buffer = HistoryBuffer(max_array, max_count)
log_buffer.update(1)
log_history, count_history = log_buffer.data
assert log_history[0] == 1
assert count_history[0] == 1
assert len(log_history) == log_buffer.max_length
assert len(count_history) == log_buffer.max_length
# Update an iterable object will raise a type error, `log_val` and
# `count` should be single value
with pytest.raises(TypeError):
log_buffer.update(array_method([1, 2]))
@pytest.mark.parametrize('statistics_method, log_buffer_type',
[(np.min, 'min'), (np.max, 'max')])
def test_max_min(self, statistics_method, log_buffer_type):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert statistics_method(log_history[-10:]) == \
getattr(log_buffer, log_buffer_type)(10)
assert statistics_method(log_history) == \
getattr(log_buffer, log_buffer_type)()
def test_mean(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert np.sum(log_history[-10:]) / \
np.sum(count_history[-10:]) == \
log_buffer.mean(10)
assert np.sum(log_history) / \
np.sum(count_history) == \
log_buffer.mean()
def test_current(self):
log_history = np.random.randint(1, 5, 20)
count_history = np.ones(20)
log_buffer = HistoryBuffer(log_history, count_history)
assert log_history[-1] == log_buffer.current()
# test get empty array
log_buffer = HistoryBuffer()
with pytest.raises(ValueError):
log_buffer.current()
def test_statistics(self):
log_history = np.array([1, 2, 3, 4, 5])
count_history = np.array([1, 1, 1, 1, 1])
log_buffer = HistoryBuffer(log_history, count_history)
assert log_buffer.statistics('mean') == 3
assert log_buffer.statistics('min') == 1
assert log_buffer.statistics('max') == 5
assert log_buffer.statistics('current') == 5
# Access unknown method will raise an error.
with pytest.raises(KeyError):
log_buffer.statistics('unknown')
def test_register_statistics(self):
@HistoryBuffer.register_statistics
def custom_statistics(self):
return -1
log_buffer = HistoryBuffer()
assert log_buffer.statistics('custom_statistics') == -1
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
# use caffe img_norm
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler):
"""Callback handler that returns an async iterator.
Only the final output of the agent will be iterated.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
async def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
self.answer_reached = False
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if self.answer_reached:
self.done.set()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
self.queue.put_nowait(t)
return
# If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token)
|
from __future__ import annotations
from typing import Any, Dict, List, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler):
"""Callback handler that returns an async iterator.
Only the final output of the agent will be iterated.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
else:
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[List[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
self.answer_reached = False
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if self.answer_reached:
self.done.set()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
self.queue.put_nowait(t)
return
# If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token)
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyTensor, Embedding, ImageUrl
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an Embedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray import Image
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[Embedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, Image, Text
# compose it
class MultiModalDoc(Document):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[AnyTensor]
embedding: Optional[Embedding]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import Embedding, ImageUrl, Tensor
class Image(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), a Tensor (`Image.tensor`),
and an Embedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import Image
# use it directly
image = Image(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray import Image
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyImage(Image):
second_embedding: Optional[Embedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import Document, Image, Text
# compose it
class MultiModalDoc(Document):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
"""
url: Optional[ImageUrl]
tensor: Optional[Tensor]
embedding: Optional[Embedding]
|
import os
from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_UTTERANCE_IDS = [
[19, 198, "000000", "000000"],
[26, 495, "000004", "000000"],
]
_ORIGINAL_TEXT = "this is the original text."
_NORMALIZED_TEXT = "this is the normalized text."
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LibriTTS", "train-clean-100")
for i, utterance_id in enumerate(_UTTERANCE_IDS):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 24000)
mocked_data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, "w") as file_:
file_.write(_ORIGINAL_TEXT)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, "w") as file_:
file_.write(_NORMALIZED_TEXT)
return mocked_data, _UTTERANCE_IDS, _ORIGINAL_TEXT, _NORMALIZED_TEXT
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
root_dir = None
data = []
_utterance_ids, _original_text, _normalized_text = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._utterance_ids, cls._original_text, cls._normalized_text = get_mock_dataset(cls.root_dir)
def _test_libritts(self, dataset):
n_ites = 0
for i, (
waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id,
) in enumerate(dataset):
expected_ids = self._utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self._original_text
assert normalized_text == self._normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self._utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets.libritts import LIBRITTS
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
_UTTERANCE_IDS = [
[19, 198, "000000", "000000"],
[26, 495, "000004", "000000"],
]
_ORIGINAL_TEXT = "this is the original text."
_NORMALIZED_TEXT = "this is the normalized text."
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LibriTTS", "train-clean-100")
for i, utterance_id in enumerate(_UTTERANCE_IDS):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype="int16", seed=i)
save_wav(path, data, 24000)
mocked_data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, "w") as file_:
file_.write(_ORIGINAL_TEXT)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, "w") as file_:
file_.write(_NORMALIZED_TEXT)
return mocked_data, _UTTERANCE_IDS, _ORIGINAL_TEXT, _NORMALIZED_TEXT
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data = []
_utterance_ids, _original_text, _normalized_text = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._utterance_ids, cls._original_text, cls._normalized_text = get_mock_dataset(cls.root_dir)
def _test_libritts(self, dataset):
n_ites = 0
for i, (
waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id,
) in enumerate(dataset):
expected_ids = self._utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self._original_text
assert normalized_text == self._normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self._utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocList[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocList[MyDoc]()
da_generator = DocList[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocArray[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc]()
da_generator = DocArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize(self, value: Any) -> str:
...
@abstractmethod
def deserialize(self, value: str) -> Any:
...
class JsonSerializer(BaseSerializer):
def _serialize_value(self, value: Any) -> Any:
"""Helper to serialize a single value."""
if isinstance(value, BaseComponent):
return {
"__is_component": True,
"value": value.to_dict(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, BaseModel):
return {
"__is_pydantic": True,
"value": value.model_dump(),
"qualified_name": get_qualified_name(value),
}
elif isinstance(value, dict):
return {k: self._serialize_value(v) for k, v in value.items()}
elif isinstance(value, list):
return [self._serialize_value(item) for item in value]
return value
def serialize(self, value: Any) -> str:
try:
serialized_value = self._serialize_value(value)
return json.dumps(serialized_value)
except Exception as e:
raise ValueError(f"Failed to serialize value: {type(value)}: {value!s}")
def _deserialize_value(self, data: Any) -> Any:
"""Helper to deserialize a single value."""
if isinstance(data, dict):
if data.get("__is_pydantic") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.model_validate(data["value"])
elif data.get("__is_component") and data.get("qualified_name"):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.from_dict(data["value"])
return {k: self._deserialize_value(v) for k, v in data.items()}
elif isinstance(data, list):
return [self._deserialize_value(item) for item in data]
return data
def deserialize(self, value: str) -> Any:
data = json.loads(value)
return self._deserialize_value(data)
class JsonPickleSerializer(JsonSerializer):
def serialize(self, value: Any) -> str:
"""Serialize while prioritizing JSON, falling back to Pickle."""
try:
return super().serialize(value)
except Exception:
return base64.b64encode(pickle.dumps(value)).decode("utf-8")
def deserialize(self, value: str) -> Any:
"""Deserialize while prioritizing Pickle, falling back to JSON."""
try:
return pickle.loads(base64.b64decode(value))
except Exception:
return super().deserialize(value)
|
import base64
import json
import pickle
from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel
from llama_index.core.schema import BaseComponent
from .utils import import_module_from_qualified_name, get_qualified_name
class BaseSerializer(ABC):
@abstractmethod
def serialize(self, value: Any) -> str:
...
@abstractmethod
def deserialize(self, value: str) -> Any:
...
class JsonSerializer(BaseSerializer):
def serialize(self, value: Any) -> str:
if isinstance(value, BaseComponent):
return json.dumps(
{
"__is_component": True,
"value": value.to_dict(),
"qualified_name": get_qualified_name(value),
}
)
elif isinstance(value, BaseModel):
return json.dumps(
{
"__is_pydantic": True,
"value": value.model_dump(),
"qualified_name": get_qualified_name(value),
}
)
return json.dumps(value)
def deserialize(self, value: str) -> Any:
data = json.loads(value)
if (
isinstance(data, dict)
and data.get("__is_pydantic")
and data.get("qualified_name")
):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.model_validate(data["value"])
elif (
isinstance(data, dict)
and data.get("__is_component")
and data.get("qualified_name")
):
module_class = import_module_from_qualified_name(data["qualified_name"])
return module_class.from_dict(data["value"])
return data
class JsonPickleSerializer(JsonSerializer):
def serialize(self, value: Any) -> str:
"""Serialize while prioritizing JSON, falling back to Pickle."""
try:
return super().serialize(value)
except Exception:
return base64.b64encode(pickle.dumps(value)).decode("utf-8")
def deserialize(self, value: str) -> Any:
"""Deserialize while prioritizing Pickle, falling back to JSON."""
try:
return pickle.loads(base64.b64decode(value))
except Exception:
return super().deserialize(value)
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Callable
import pytest
from jina import DocumentArray
from ...transform_encoder import TransformerTorchEncoder
MODELS_TO_TEST = [
'sentence-transformers/distilbert-base-nli-stsb-mean-tokens',
'bert-base-uncased',
'distilroberta-base',
'distilbert-base-cased-distilled-squad',
]
@pytest.mark.parametrize('model_name', MODELS_TO_TEST)
def test_load_torch_models(model_name: str, data_generator: Callable):
encoder = TransformerTorchEncoder(pretrained_model_name_or_path=model_name)
docs = DocumentArray([doc for doc in data_generator()])
encoder.encode(docs=docs, parameters={})
for doc in docs:
assert doc.embedding is not None
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
from typing import Dict, Callable
import pytest
from jina import DocumentArray
from ...transform_encoder import TransformerTorchEncoder
MODELS_TO_TEST = [
'sentence-transformers/distilbert-base-nli-stsb-mean-tokens',
'bert-base-uncased',
'distilroberta-base',
'distilbert-base-cased-distilled-squad',
]
@pytest.mark.parametrize(
'model_name', MODELS_TO_TEST
)
def test_load_torch_models(model_name: str, data_generator: Callable):
encoder = TransformerTorchEncoder(pretrained_model_name_or_path=model_name)
docs = DocumentArray([doc for doc in data_generator()])
encoder.encode(
docs=docs,
parameters={}
)
for doc in docs:
assert doc.embedding is not None
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 1.0, similarity_fct=util.dot_score) -> None:
"""
Given a list of (anchor, positive) pairs or (anchor, positive, negative) triplets, this loss optimizes the following:
1. Given an anchor (e.g. a question), assign the highest similarity to the corresponding positive (i.e. answer)
out of every single positive and negative (e.g. all answers) in the batch.
If you provide the optional negatives, they will all be used as extra options from which the model must pick the
correct positive. Within reason, the harder this "picking" is, the stronger the model will become. Because of
this, a higher batch size results in more in-batch negatives, which then increases performance (to a point).
This loss function works great to train embeddings for retrieval setups where you have positive pairs
(e.g. (query, answer)) as it will sample in each batch ``n-1`` negative docs randomly.
This loss is also known as InfoNCE loss, SimCSE loss, Cross-Entropy Loss with in-batch negatives, or simply
in-batch negatives loss.
Args:
model: SparseEncoder model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, dot product. Can also be set to cosine
similarity (and then set scale to 20)
Requirements:
1. Need to be used in SpladeLoss or CSRLoss as a loss function.
2. (anchor, positive) pairs or (anchor, positive, negative) triplets
Inputs:
+-------------------------------------------------+--------+
| Texts | Labels |
+=================================================+========+
| (anchor, positive) pairs | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative) triplets | none |
+-------------------------------------------------+--------+
| (anchor, positive, negative_1, ..., negative_n) | none |
+-------------------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseCachedMultipleNegativesRankingLoss` is equivalent to this loss, but it uses caching that allows for
much higher batch sizes (and thus better performance) without extra memory usage. However, it is slightly
slower.
- :class:`SparseGISTEmbedLoss` is equivalent to this loss, but uses a guide model to guide the in-batch negative
sample selection. `SparseGISTEmbedLoss` yields a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SpladeLoss(
model=model, loss=losses.SparseMultipleNegativesRankingLoss(model), lambda_corpus=3e-5, lambda_query=5e-5
)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError(
"SparseMultipleNegativesRankingLoss should not be used alone. Use it with SpladeLoss or CSRLoss."
)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseMultipleNegativesRankingLoss(MultipleNegativesRankingLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.dot_score) -> None:
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
"""Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import ProtocolType
parser.add_argument(
'--protocol',
type=ProtocolType.from_string,
choices=list(ProtocolType),
default=ProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--tracing',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--traces-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--traces-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--metrics',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--metrics-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--metrics-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--log-config',
type=str,
default='default',
help='The config name or the absolute path to the YAML config file of the logger used in this object.',
)
parser.add_argument(
'--reuse-session',
action='store_true',
default=False,
help='True if HTTPClient should reuse ClientSession. If true, user will be responsible to close it',
)
|
"""Module for argparse for Client"""
def mixin_client_protocol_parser(parser):
"""Add the arguments for the protocol to the client parser
:param parser: the parser configure
"""
from jina.enums import ProtocolType
parser.add_argument(
'--protocol',
type=ProtocolType.from_string,
choices=list(ProtocolType),
default=ProtocolType.GRPC,
help='Communication protocol between server and client.',
)
def mixin_client_features_parser(parser):
"""Add the arguments for the client to the parser
:param parser: the parser configure
"""
parser.add_argument(
'--asyncio',
action='store_true',
default=False,
help='If set, then the input and output of this Client work in an asynchronous manner. ',
)
parser.add_argument(
'--tracing',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry tracer will be available and will be enabled for automatic tracing of requests and customer span creation. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--traces-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--traces-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the trace exporter agent.',
)
parser.add_argument(
'--metrics',
action='store_true',
default=False,
help='If set, the sdk implementation of the OpenTelemetry metrics will be available for default monitoring and custom measurements. '
'Otherwise a no-op implementation will be provided.',
)
parser.add_argument(
'--metrics-exporter-host',
type=str,
default=None,
help='If tracing is enabled, this hostname will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--metrics-exporter-port',
type=int,
default=None,
help='If tracing is enabled, this port will be used to configure the metrics exporter agent.',
)
parser.add_argument(
'--log-config',
type=str,
default='default',
help='The config name or the absolute path to the YAML config file of the logger used in this object.',
)
|
from jina import DocumentArray, Executor, requests
class TestExecutor(Executor):
@requests
def process(self, docs: DocumentArray, **kwargs):
return docs
|
from jina import Executor, requests, DocumentArray
class TestExecutor(Executor):
@requests
def process(self, docs: DocumentArray, **kwargs):
return docs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.