input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import MODEL_WRAPPERS
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestMeanTeacherHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test1')
runner.train()
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
# checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(BaseModel):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(ToyModel2()),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test3')
runner.test()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from mmengine.model import BaseModel
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS, MODEL_WRAPPERS
from mmengine.runner import Runner
from torch.utils.data import Dataset
from mmdet.utils import register_all_modules
register_all_modules()
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, batch_inputs, labels, mode='tensor'):
labels = torch.stack(labels)
outputs = self.linear(batch_inputs)
if mode == 'tensor':
return outputs
elif mode == 'loss':
loss = (labels - outputs).sum()
outputs = dict(loss=loss)
return outputs
else:
return outputs
class ToyModel1(BaseModel, ToyModel):
def __init__(self):
super().__init__()
def forward(self, *args, **kwargs):
return super(BaseModel, self).forward(*args, **kwargs)
class ToyModel2(BaseModel):
def __init__(self):
super().__init__()
self.teacher = ToyModel1()
self.student = ToyModel1()
def forward(self, *args, **kwargs):
return self.student(*args, **kwargs)
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
@property
def metainfo(self):
return self.METAINFO
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestMeanTeacherHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_mean_teacher_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel2().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
default_scope='mmdet',
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test1')
runner.train()
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
# checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(BaseModel):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(ToyModel2()),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
default_scope='mmdet',
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='MeanTeacherHook')],
experiment_name='test3')
runner.test()
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.prompt import (
POWERBI_CHAT_PREFIX,
POWERBI_CHAT_SUFFIX,
POWERBI_PREFIX,
POWERBI_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"POWERBI_CHAT_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_CHAT_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"POWERBI_CHAT_PREFIX",
"POWERBI_CHAT_SUFFIX",
"POWERBI_PREFIX",
"POWERBI_SUFFIX",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.powerbi.prompt import (
POWERBI_CHAT_PREFIX,
POWERBI_CHAT_SUFFIX,
POWERBI_PREFIX,
POWERBI_SUFFIX,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"POWERBI_CHAT_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_CHAT_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_PREFIX": "langchain_community.agent_toolkits.powerbi.prompt",
"POWERBI_SUFFIX": "langchain_community.agent_toolkits.powerbi.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"POWERBI_PREFIX",
"POWERBI_SUFFIX",
"POWERBI_CHAT_PREFIX",
"POWERBI_CHAT_SUFFIX",
]
|
from __future__ import annotations
import collections
import json
import os
import string
from typing import Iterable
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> list[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
import collections
import json
import os
import string
from typing import Iterable, List
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
class WhitespaceTokenizer(WordTokenizer):
"""
Simple and fast white-space tokenizer. Splits sentence based on white spaces.
Punctuation are stripped from tokens.
"""
def __init__(
self, vocab: Iterable[str] = [], stop_words: Iterable[str] = ENGLISH_STOP_WORDS, do_lower_case: bool = False
):
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
def tokenize(self, text: str, **kwargs) -> List[int]:
if self.do_lower_case:
text = text.lower()
tokens = text.split()
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "whitespacetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "whitespacetokenizer_config.json"), "r") as fIn:
config = json.load(fIn)
return WhitespaceTokenizer(**config)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"OpenAICallbackHandler": "langchain_community.callbacks.openai_info",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"OpenAICallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.openai_info import OpenAICallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"OpenAICallbackHandler": "langchain_community.callbacks.openai_info"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"OpenAICallbackHandler",
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='TOOD',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='TOODHead',
num_classes=80,
in_channels=256,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
train_cfg=dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='TOOD',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='TOODHead',
num_classes=80,
in_channels=256,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
train_cfg=dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
# custom hooks
custom_hooks = [dict(type='SetEpochInfoHook')]
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.sparsity(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['active_dims']:.2f}")
print(f"Sparsity percentage: {stats['sparsity_ratio']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'active_dims': 56.66666793823242, 'sparsity_ratio': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.sparsity(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['active_dims']:.2f}")
print(f"Sparsity percentage: {stats_limited['sparsity_ratio']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'active_dims': 32.0, 'sparsity_ratio': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
"""
This is a simple application for sparse encoder: Computing embeddings.
we have multiple sentences and we want to compute their embeddings.
The embeddings are sparse, meaning that most of the values are zero.
The embeddings are stored in a sparse matrix format, which is more efficient for storage and computation.
we can also visualize the top tokens for each text."""
from sentence_transformers import SparseEncoder
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Embed a list of sentences
sentences = [
"This framework generates embeddings for each input sentence",
"Sentences are passed as a list of string.",
"The quick brown fox jumps over the lazy dog.",
]
# Generate embeddings
embeddings = model.encode(sentences)
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
stats = model.get_sparsity_stats(embeddings)
print(f"Embedding sparsity: {stats}")
print(f"Average non-zero dimensions: {stats['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats['row_sparsity_mean']:.2%}")
"""
Embedding dim: 30522
Embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 56.66666793823242, 'row_sparsity_mean': 0.9981433749198914}
Average non-zero dimensions: 56.67
Sparsity percentage: 99.81%
"""
# Visualize top tokens for each text
top_k = 10
token_weights = model.decode(embeddings, top_k=top_k)
print(f"\nTop tokens {top_k} for each text:")
# The result is a list of sentence embeddings as numpy arrays
for i, sentence in enumerate(sentences):
token_scores = ", ".join([f'("{token.strip()}", {value:.2f})' for token, value in token_weights[i]])
print(f"{i}: {sentence} -> Top tokens: {token_scores}")
"""
Top tokens 10 for each text:
0: This framework generates embeddings for each input sentence -> Top tokens: ("framework", 2.19), ("##bed", 2.12), ("input", 1.99), ("each", 1.60), ("em", 1.58), ("sentence", 1.49), ("generate", 1.42), ("##ding", 1.33), ("sentences", 1.10), ("create", 0.93)
1: Sentences are passed as a list of string. -> Top tokens: ("string", 2.72), ("pass", 2.24), ("sentences", 2.15), ("passed", 2.07), ("sentence", 1.90), ("strings", 1.86), ("list", 1.84), ("lists", 1.49), ("as", 1.18), ("passing", 0.73)
2: The quick brown fox jumps over the lazy dog. -> Top tokens: ("lazy", 2.18), ("fox", 1.67), ("brown", 1.56), ("over", 1.52), ("dog", 1.50), ("quick", 1.49), ("jump", 1.39), ("dogs", 1.25), ("foxes", 0.99), ("jumping", 0.84)
"""
# Example of using max_active_dims during encoding
print("\n--- Using max_active_dims during encoding ---")
# Generate embeddings with limited active dimensions
embeddings_limited = model.encode(sentences, max_active_dims=32)
stats_limited = model.get_sparsity_stats(embeddings_limited)
print(f"Limited embedding sparsity: {stats_limited}")
print(f"Average non-zero dimensions: {stats_limited['row_non_zero_mean']:.2f}")
print(f"Sparsity percentage: {stats_limited['row_sparsity_mean']:.2%}")
"""
--- Using max_active_dims during encoding ---
Limited embedding sparsity: {'num_rows': 3, 'num_cols': 30522, 'row_non_zero_mean': 32.0, 'row_sparsity_mean': 0.9989516139030457}
Average non-zero dimensions: 32.00
Sparsity percentage: 99.90%
"""
# Comparing memory usage
print("\n--- Comparing memory usage ---")
def get_memory_size(tensor):
if tensor.is_sparse:
# For sparse tensors, only count non-zero elements
return (
tensor._values().element_size() * tensor._values().nelement()
+ tensor._indices().element_size() * tensor._indices().nelement()
)
else:
return tensor.element_size() * tensor.nelement()
print(f"Original embeddings memory: {get_memory_size(embeddings) / 1024:.2f} KB")
print(f"Embeddings with max_active_dims=32 memory: {get_memory_size(embeddings_limited) / 1024:.2f} KB")
"""
--- Comparing memory usage ---
Original embeddings memory: 3.32 KB
Embeddings with max_active_dims=32 memory: 1.88 KB
"""
|
_base_ = 'faster-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_4.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')),
neck=dict(
type='FPN',
in_channels=[80, 240, 560, 1360],
out_channels=256,
num_outs=5))
|
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_4.0gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')),
neck=dict(
type='FPN',
in_channels=[80, 240, 560, 1360],
out_channels=256,
num_outs=5))
|
from typing import Any, Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.language_models import BaseLanguageModel
from pydantic import BaseModel, Field, model_validator
from langchain_community.chat_models import ChatOpenAI
from langchain_community.tools.amadeus.base import AmadeusBaseTool
class ClosestAirportSchema(BaseModel):
"""Schema for the AmadeusClosestAirport tool."""
location: str = Field(
description=(
" The location for which you would like to find the nearest airport "
" along with optional details such as country, state, region, or "
" province, allowing for easy processing and identification of "
" the closest airport. Examples of the format are the following:\n"
" Cali, Colombia\n "
" Lincoln, Nebraska, United States\n"
" New York, United States\n"
" Sydney, New South Wales, Australia\n"
" Rome, Lazio, Italy\n"
" Toronto, Ontario, Canada\n"
)
)
class AmadeusClosestAirport(AmadeusBaseTool):
"""Tool for finding the closest airport to a particular location."""
name: str = "closest_airport"
description: str = (
"Use this tool to find the closest airport to a particular location."
)
args_schema: Type[ClosestAirportSchema] = ClosestAirportSchema
llm: Optional[BaseLanguageModel] = Field(default=None)
"""Tool's llm used for calculating the closest airport. Defaults to `ChatOpenAI`."""
@model_validator(mode="before")
@classmethod
def set_llm(cls, values: Dict[str, Any]) -> Any:
if not values.get("llm"):
# For backward-compatibility
values["llm"] = ChatOpenAI(temperature=0)
return values
def _run(
self,
location: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
content = (
f" What is the nearest airport to {location}? Please respond with the "
" airport's International Air Transport Association (IATA) Location "
' Identifier in the following JSON format. JSON: "iataCode": "IATA '
' Location Identifier" '
)
return self.llm.invoke(content) # type: ignore[union-attr]
|
from typing import Any, Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.language_models import BaseLanguageModel
from pydantic import BaseModel, Field, model_validator
from langchain_community.chat_models import ChatOpenAI
from langchain_community.tools.amadeus.base import AmadeusBaseTool
class ClosestAirportSchema(BaseModel):
"""Schema for the AmadeusClosestAirport tool."""
location: str = Field(
description=(
" The location for which you would like to find the nearest airport "
" along with optional details such as country, state, region, or "
" province, allowing for easy processing and identification of "
" the closest airport. Examples of the format are the following:\n"
" Cali, Colombia\n "
" Lincoln, Nebraska, United States\n"
" New York, United States\n"
" Sydney, New South Wales, Australia\n"
" Rome, Lazio, Italy\n"
" Toronto, Ontario, Canada\n"
)
)
class AmadeusClosestAirport(AmadeusBaseTool): # type: ignore[override, override, override]
"""Tool for finding the closest airport to a particular location."""
name: str = "closest_airport"
description: str = (
"Use this tool to find the closest airport to a particular location."
)
args_schema: Type[ClosestAirportSchema] = ClosestAirportSchema
llm: Optional[BaseLanguageModel] = Field(default=None)
"""Tool's llm used for calculating the closest airport. Defaults to `ChatOpenAI`."""
@model_validator(mode="before")
@classmethod
def set_llm(cls, values: Dict[str, Any]) -> Any:
if not values.get("llm"):
# For backward-compatibility
values["llm"] = ChatOpenAI(temperature=0)
return values
def _run(
self,
location: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
content = (
f" What is the nearest airport to {location}? Please respond with the "
" airport's International Air Transport Association (IATA) Location "
' Identifier in the following JSON format. JSON: "iataCode": "IATA '
' Location Identifier" '
)
return self.llm.invoke(content) # type: ignore[union-attr]
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import ChatOpenAI
class TestOpenAIStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"OPENAI_API_KEY": "api_key",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_BASE": "api_base",
"OPENAI_PROXY": "https://proxy.com",
},
{},
{
"openai_api_key": "api_key",
"openai_organization": "org_id",
"openai_api_base": "api_base",
"openai_proxy": "https://proxy.com",
},
)
|
"""Standard LangChain interface tests"""
from typing import Tuple, Type
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openai import ChatOpenAI
class TestOpenAIStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatOpenAI
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"OPENAI_API_KEY": "api_key",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_BASE": "api_base",
"OPENAI_PROXY": "https://proxy.com",
},
{},
{
"openai_api_key": "api_key",
"openai_organization": "org_id",
"openai_api_base": "api_base",
"openai_proxy": "https://proxy.com",
},
)
|
from enum import Enum
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of values."""
enum: type[Enum]
"""The enum to parse. Its values must be strings."""
@pre_init
def raise_deprecation(cls, values: dict) -> dict:
enum = values["enum"]
if not all(isinstance(e.value, str) for e in enum):
msg = "Enum values must be strings"
raise ValueError(msg)
return values
@property
def _valid_values(self) -> list[str]:
return [e.value for e in self.enum]
def parse(self, response: str) -> Enum:
try:
return self.enum(response.strip())
except ValueError:
msg = (
f"Response '{response}' is not one of the "
f"expected values: {self._valid_values}"
)
raise OutputParserException(msg)
def get_format_instructions(self) -> str:
return f"Select one of the following options: {', '.join(self._valid_values)}"
@property
def OutputType(self) -> type[Enum]:
return self.enum
|
from enum import Enum
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import pre_init
class EnumOutputParser(BaseOutputParser[Enum]):
"""Parse an output that is one of a set of values."""
enum: type[Enum]
"""The enum to parse. Its values must be strings."""
@pre_init
def raise_deprecation(cls, values: dict) -> dict:
enum = values["enum"]
if not all(isinstance(e.value, str) for e in enum):
raise ValueError("Enum values must be strings")
return values
@property
def _valid_values(self) -> list[str]:
return [e.value for e in self.enum]
def parse(self, response: str) -> Enum:
try:
return self.enum(response.strip())
except ValueError:
raise OutputParserException(
f"Response '{response}' is not one of the "
f"expected values: {self._valid_values}"
)
def get_format_instructions(self) -> str:
return f"Select one of the following options: {', '.join(self._valid_values)}"
@property
def OutputType(self) -> type[Enum]:
return self.enum
|
"""Cohere Reranker Finetuning Engine."""
import importlib.util
import os
from typing import Optional
from llama_index.finetuning.types import BaseCohereRerankerFinetuningEngine
from llama_index.postprocessor.cohere_rerank import CohereRerank
class CohereRerankerFinetuneEngine(BaseCohereRerankerFinetuningEngine):
"""Cohere Reranker Finetune Engine."""
def __init__(
self,
train_file_name: str = "train.jsonl",
val_file_name: Optional[str] = None,
model_name: str = "exp_finetune",
model_type: str = "RERANK",
base_model: str = "english",
api_key: Optional[str] = None,
) -> None:
"""Init params."""
# This will be None if 'cohere' module is not available
cohere_spec = importlib.util.find_spec("cohere")
if cohere_spec is not None:
import cohere
else:
# Raise an ImportError if 'cohere' is not installed
raise ImportError(
"Cannot import cohere. Please install the package using `pip install cohere`."
)
try:
self.api_key = api_key or os.environ["COHERE_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in cohere api key or "
"specify via COHERE_API_KEY environment variable "
)
self._model = cohere.Client(self.api_key, client_name="llama_index")
self._train_file_name = train_file_name
self._val_file_name = val_file_name
self._model_name = model_name
self._model_type = model_type
self._base_model = base_model
self._finetune_model = None
def finetune(self) -> None:
"""Finetune model."""
from cohere.custom_model_dataset import JsonlDataset
if self._val_file_name:
# Uploading both train file and eval file
dataset = JsonlDataset(
train_file=self._train_file_name, eval_file=self._val_file_name
)
else:
# Single Train File Upload:
dataset = JsonlDataset(train_file=self._train_file_name)
self._finetune_model = self._model.create_custom_model(
name=self._model_name,
dataset=dataset,
model_type=self._model_type,
base_model=self._base_model,
)
def get_finetuned_model(self, top_n: int = 5) -> CohereRerank:
"""Gets finetuned model id."""
if self._finetune_model is None:
raise RuntimeError(
"Finetuned model is not set yet. Please run the finetune method first."
)
return CohereRerank(
model=self._finetune_model.id, top_n=top_n, api_key=self.api_key
)
|
"""Cohere Reranker Finetuning Engine."""
import importlib.util
import os
from typing import Optional
from llama_index.finetuning.types import BaseCohereRerankerFinetuningEngine
from llama_index.postprocessor.cohere_rerank import CohereRerank
class CohereRerankerFinetuneEngine(BaseCohereRerankerFinetuningEngine):
"""Cohere Reranker Finetune Engine."""
def __init__(
self,
train_file_name: str = "train.jsonl",
val_file_name: Optional[str] = None,
model_name: str = "exp_finetune",
model_type: str = "RERANK",
base_model: str = "english",
api_key: Optional[str] = None,
) -> None:
"""Init params."""
# This will be None if 'cohere' module is not available
cohere_spec = importlib.util.find_spec("cohere")
if cohere_spec is not None:
import cohere
else:
# Raise an ImportError if 'cohere' is not installed
raise ImportError(
"Cannot import cohere. Please install the package using `pip install cohere`."
)
try:
self.api_key = api_key or os.environ["COHERE_API_KEY"]
except IndexError:
raise ValueError(
"Must pass in cohere api key or "
"specify via COHERE_API_KEY environment variable "
)
self._model = cohere.Client(self.api_key, client_name="llama_index")
self._train_file_name = train_file_name
self._val_file_name = val_file_name
self._model_name = model_name
self._model_type = model_type
self._base_model = base_model
self._finetune_model = None
def finetune(self) -> None:
"""Finetune model."""
from cohere.custom_model_dataset import JsonlDataset
if self._val_file_name:
# Uploading both train file and eval file
dataset = JsonlDataset(
train_file=self._train_file_name, eval_file=self._val_file_name
)
else:
# Single Train File Upload:
dataset = JsonlDataset(train_file=self._train_file_name)
self._finetune_model = self._model.create_custom_model(
name=self._model_name,
dataset=dataset,
model_type=self._model_type,
base_model=self._base_model,
)
def get_finetuned_model(self, top_n: int = 5) -> CohereRerank:
"""Gets finetuned model id."""
if self._finetune_model is None:
raise RuntimeError(
"Finetuned model is not set yet. Please run the finetune method first."
)
return CohereRerank(
model=self._finetune_model.id, top_n=top_n, api_key=self.api_key
)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.40%
Model Anchor Sparsity: Active Dimensions: 103.0, Sparsity Ratio: 0.9966
Model Positive Sparsity: Active Dimensions: 67.4, Sparsity Ratio: 0.9978
Model Negative Sparsity: Active Dimensions: 65.9, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8540
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
Model Anchor Sparsity: Active Dimensions: 105.5, Sparsity Ratio: 0.9965
Model Positive Sparsity: Active Dimensions: 69.8, Sparsity Ratio: 0.9977
Model Negative Sparsity: Active Dimensions: 68.6, Sparsity Ratio: 0.9978
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import json
import os
from typing import List
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1, 3, 5],
stride_sizes: List[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
model = CNN(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import json
import os
from typing import List
import torch
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1, 3, 5],
stride_sizes: List[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
model = CNN(**config)
model.load_state_dict(weights)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.utils.util_random import ensure_rng
def random_boxes(num=1, scale=1, rng=None):
"""Simple version of ``kwimage.Boxes.random``
Returns:
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
References:
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
Example:
>>> num = 3
>>> scale = 512
>>> rng = 0
>>> boxes = random_boxes(num, scale, rng)
>>> print(boxes)
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
[216.9113, 330.6978, 224.0446, 456.5878],
[405.3632, 196.3221, 493.3953, 270.7942]])
"""
rng = ensure_rng(rng)
tlbr = rng.rand(num, 4).astype(np.float32)
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
tlbr[:, 0] = tl_x * scale
tlbr[:, 1] = tl_y * scale
tlbr[:, 2] = br_x * scale
tlbr[:, 3] = br_y * scale
boxes = torch.from_numpy(tlbr)
return boxes
|
import numpy as np
import torch
from mmdet.utils.util_random import ensure_rng
def random_boxes(num=1, scale=1, rng=None):
"""Simple version of ``kwimage.Boxes.random``
Returns:
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
References:
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
Example:
>>> num = 3
>>> scale = 512
>>> rng = 0
>>> boxes = random_boxes(num, scale, rng)
>>> print(boxes)
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
[216.9113, 330.6978, 224.0446, 456.5878],
[405.3632, 196.3221, 493.3953, 270.7942]])
"""
rng = ensure_rng(rng)
tlbr = rng.rand(num, 4).astype(np.float32)
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
tlbr[:, 0] = tl_x * scale
tlbr[:, 1] = tl_y * scale
tlbr[:, 2] = br_x * scale
tlbr[:, 3] = br_y * scale
boxes = torch.from_numpy(tlbr)
return boxes
|
"""
This script contains an example how to perform re-ranking with a Cross-Encoder for semantic search.
First, we use an efficient Bi-Encoder to retrieve similar questions from the Quora Duplicate Questions dataset:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Then, we re-rank the hits from the Bi-Encoder using a Cross-Encoder.
"""
import csv
import os
import pickle
import time
from sentence_transformers import CrossEncoder, SentenceTransformer, util
# We use a BiEncoder (SentenceTransformer) that produces embeddings for questions.
# We then search for similar questions using cosine similarity and identify the top 100 most similar questions
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
num_candidates = 500
# To refine the results, we use a CrossEncoder. A CrossEncoder gets both inputs (input_question, retrieved_question)
# and outputs a score 0...1 indicating the similarity.
cross_encoder_model = CrossEncoder("cross-encoder/stsb-roberta-base")
# Dataset we want to use
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 20000
# Some local file to cache computed embeddings
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
print("Input question:", inp_question)
# First, retrieve candidates using cosine similarity search
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=num_candidates)
hits = hits[0] # Get the hits for the first query
print("Cosine-Similarity search took {:.3f} seconds".format(time.time() - start_time))
print("Top 5 hits with cosine-similarity:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
# Now, do the re-ranking with the cross-encoder
start_time = time.time()
sentence_pairs = [[inp_question, corpus_sentences[hit["corpus_id"]]] for hit in hits]
ce_scores = cross_encoder_model.predict(sentence_pairs)
for idx in range(len(hits)):
hits[idx]["cross-encoder_score"] = ce_scores[idx]
# Sort list by CrossEncoder scores
hits = sorted(hits, key=lambda x: x["cross-encoder_score"], reverse=True)
print("\nRe-ranking with CrossEncoder took {:.3f} seconds".format(time.time() - start_time))
print("Top 5 hits with CrossEncoder:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["cross-encoder_score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This script contains an example how to perform re-ranking with a Cross-Encoder for semantic search.
First, we use an efficient Bi-Encoder to retrieve similar questions from the Quora Duplicate Questions dataset:
https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs
Then, we re-rank the hits from the Bi-Encoder using a Cross-Encoder.
"""
from sentence_transformers import SentenceTransformer, util
from sentence_transformers import CrossEncoder
import os
import csv
import pickle
import time
# We use a BiEncoder (SentenceTransformer) that produces embeddings for questions.
# We then search for similar questions using cosine similarity and identify the top 100 most similar questions
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
num_candidates = 500
# To refine the results, we use a CrossEncoder. A CrossEncoder gets both inputs (input_question, retrieved_question)
# and outputs a score 0...1 indicating the similarity.
cross_encoder_model = CrossEncoder("cross-encoder/stsb-roberta-base")
# Dataset we want to use
url = "http://qim.fs.quoracdn.net/quora_duplicate_questions.tsv"
dataset_path = "quora_duplicate_questions.tsv"
max_corpus_size = 20000
# Some local file to cache computed embeddings
embedding_cache_path = "quora-embeddings-{}-size-{}.pkl".format(model_name.replace("/", "_"), max_corpus_size)
# Check if embedding cache path exists
if not os.path.exists(embedding_cache_path):
# Check if the dataset exists. If not, download and extract
# Download dataset if needed
if not os.path.exists(dataset_path):
print("Download dataset")
util.http_get(url, dataset_path)
# Get all unique sentences from the file
corpus_sentences = set()
with open(dataset_path, encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_MINIMAL)
for row in reader:
corpus_sentences.add(row["question1"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences.add(row["question2"])
if len(corpus_sentences) >= max_corpus_size:
break
corpus_sentences = list(corpus_sentences)
print("Encode the corpus. This might take a while")
corpus_embeddings = model.encode(corpus_sentences, show_progress_bar=True, convert_to_tensor=True)
print("Store file on disc")
with open(embedding_cache_path, "wb") as fOut:
pickle.dump({"sentences": corpus_sentences, "embeddings": corpus_embeddings}, fOut)
else:
print("Load pre-computed embeddings from disc")
with open(embedding_cache_path, "rb") as fIn:
cache_data = pickle.load(fIn)
corpus_sentences = cache_data["sentences"][0:max_corpus_size]
corpus_embeddings = cache_data["embeddings"][0:max_corpus_size]
###############################
print("Corpus loaded with {} sentences / embeddings".format(len(corpus_sentences)))
while True:
inp_question = input("Please enter a question: ")
print("Input question:", inp_question)
# First, retrieve candidates using cosine similarity search
start_time = time.time()
question_embedding = model.encode(inp_question, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=num_candidates)
hits = hits[0] # Get the hits for the first query
print("Cosine-Similarity search took {:.3f} seconds".format(time.time() - start_time))
print("Top 5 hits with cosine-similarity:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["score"], corpus_sentences[hit["corpus_id"]]))
# Now, do the re-ranking with the cross-encoder
start_time = time.time()
sentence_pairs = [[inp_question, corpus_sentences[hit["corpus_id"]]] for hit in hits]
ce_scores = cross_encoder_model.predict(sentence_pairs)
for idx in range(len(hits)):
hits[idx]["cross-encoder_score"] = ce_scores[idx]
# Sort list by CrossEncoder scores
hits = sorted(hits, key=lambda x: x["cross-encoder_score"], reverse=True)
print("\nRe-ranking with CrossEncoder took {:.3f} seconds".format(time.time() - start_time))
print("Top 5 hits with CrossEncoder:")
for hit in hits[0:5]:
print("\t{:.3f}\t{}".format(hit["cross-encoder_score"], corpus_sentences[hit["corpus_id"]]))
print("\n\n========\n")
|
from typing import Dict, List, Optional, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_integer_field():
class Meow(BaseDocument):
age: int
wealth: float
registered: bool
d = Meow(age=30, wealth=100.5, registered=True)
rebuilt_doc = Meow.from_protobuf(d.to_protobuf())
assert rebuilt_doc.age == 30
assert rebuilt_doc.wealth == 100.5
assert rebuilt_doc.registered
@pytest.mark.proto
def test_list_set_dict_tuple_field():
class MyDoc(BaseDocument):
list_: List
dict_: Dict
tuple_: Tuple
set_: Set
d = MyDoc(
list_=[0, 1, 2], dict_={'a': 0, 'b': 1}, tuple_=tuple([0, 1]), set_={0, 1}
)
rebuilt_doc = MyDoc.from_protobuf(d.to_protobuf())
assert rebuilt_doc.list_ == [0, 1, 2]
assert rebuilt_doc.dict_ == {'a': 0, 'b': 1}
assert rebuilt_doc.tuple_ == (0, 1)
assert rebuilt_doc.set_ == {0, 1}
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
np.uint,
np.uint8,
np.uint64,
int,
np.int8,
np.int64,
float,
np.float16,
np.float128,
np.double,
],
)
def test_ndarray_dtype(dtype):
class MyDoc(BaseDocument):
tensor: NdArray
doc = MyDoc(tensor=np.ndarray([1, 2, 3], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
torch.uint8,
torch.int,
torch.int8,
torch.int64,
torch.float,
torch.float64,
torch.double,
],
)
def test_torch_dtype(dtype):
class MyDoc(BaseDocument):
tensor: TorchTensor
doc = MyDoc(tensor=torch.zeros([5, 5], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
|
from typing import Optional, Dict, List, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_integer_field():
class Meow(BaseDocument):
age: int
wealth: float
registered: bool
d = Meow(age=30, wealth=100.5, registered=True)
rebuilt_doc = Meow.from_protobuf(d.to_protobuf())
assert rebuilt_doc.age == 30
assert rebuilt_doc.wealth == 100.5
assert rebuilt_doc.registered
@pytest.mark.proto
def test_list_set_dict_tuple_field():
class MyDoc(BaseDocument):
list_: List
dict_: Dict
tuple_: Tuple
set_: Set
d = MyDoc(
list_=[0, 1, 2], dict_={'a': 0, 'b': 1}, tuple_=tuple([0, 1]), set_={0, 1}
)
rebuilt_doc = MyDoc.from_protobuf(d.to_protobuf())
assert rebuilt_doc.list_ == [0, 1, 2]
assert rebuilt_doc.dict_ == {'a': 0, 'b': 1}
assert rebuilt_doc.tuple_ == (0, 1)
assert rebuilt_doc.set_ == {0, 1}
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
np.uint,
np.uint8,
np.uint64,
np.int,
np.int8,
np.int64,
np.float,
np.float16,
np.float128,
np.double,
],
)
def test_ndarray_dtype(dtype):
class MyDoc(BaseDocument):
tensor: NdArray
doc = MyDoc(tensor=np.ndarray([1, 2, 3], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
@pytest.mark.proto
@pytest.mark.parametrize(
'dtype',
[
torch.uint8,
torch.int,
torch.int8,
torch.int64,
torch.float,
torch.float64,
torch.double,
],
)
def test_torch_dtype(dtype):
class MyDoc(BaseDocument):
tensor: TorchTensor
doc = MyDoc(tensor=torch.zeros([5, 5], dtype=dtype))
assert doc.tensor.dtype == dtype
assert MyDoc.from_protobuf(doc.to_protobuf()).tensor.dtype == dtype
assert MyDoc.parse_obj(doc.dict()).tensor.dtype == dtype
|
# Owner(s): ["module: dynamo"]
import unittest
import torch
import torch._dynamo.test_case
from torch._dynamo.testing import CompileCounter, EagerAndRecordGraphs, normalize_gm
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import TEST_XPU
device_type = (
acc.type if (acc := torch.accelerator.current_accelerator(True)) else "cpu"
)
class PythonDispatcherTests(torch._dynamo.test_case.TestCase):
def test_dispatch_key1(self):
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
x = x + 1
return torch._C._dispatch_keys(x)
x = torch.randn(2, 3)
self.assertTrue(fn(x).raw_repr() == torch._C._dispatch_keys(x + 1).raw_repr())
def test_dispatch_key2(self):
from torch.testing._internal.two_tensor import TwoTensor
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
x = x.sin()
return torch._C._dispatch_keys(x)
x = torch.randn(3)
y = torch.randn(3)
z = TwoTensor(x, y)
self.assertTrue(fn(z).raw_repr() == torch._C._dispatch_keys(z.sin()).raw_repr())
def test_dispatch_key3(self):
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
key_set = torch._C._dispatch_tls_local_include_set()
return torch.sin(x + 1), key_set
x = torch.randn(2, 3)
self.assertEqual(fn(x)[0], torch.sin(x + 1))
self.assertTrue(
fn(x)[1].raw_repr() == torch._C._dispatch_tls_local_include_set().raw_repr()
)
def test_dispatch_key4(self):
eager = EagerAndRecordGraphs()
@torch.compile(backend=eager, fullgraph=True)
def fn(x):
key_set = torch._C._dispatch_tls_local_include_set()
key_set = key_set | torch._C._dispatch_keys(x)
key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
if key_set.highestPriorityTypeId() == torch.DispatchKey.PythonDispatcher:
return torch.sin(x + 1)
else:
return torch.sin(x - 1)
x = torch.randn(2, 3)
self.assertEqual(fn(x), torch.sin(x - 1))
graph = eager.graphs[0]
actual = normalize_gm(graph.print_readable(False))
self.assertExpectedInline(
actual,
"""\
class GraphModule(torch.nn.Module):
def forward(self, L_x_: "f32[2, 3]"):
l_x_ = L_x_
sub: "f32[2, 3]" = l_x_ - 1; l_x_ = None
sin: "f32[2, 3]" = torch.sin(sub); sub = None
return (sin,)
""", # NOQA: B950
)
@unittest.skipIf(not TEST_CUDA and not TEST_XPU, "requires cuda or xpu")
def test_dispatch_key_set_guard(self):
counter = CompileCounter()
@torch.compile(backend=counter, fullgraph=True)
def fn(x, dks):
if dks.has("CPU"):
return torch.sin(x + 1)
else:
return torch.sin(x - 1)
x1 = torch.randn(2, 3)
dks1 = torch._C._dispatch_keys(x1)
self.assertEqual(fn(x1, dks1), torch.sin(x1 + 1))
self.assertEqual(counter.frame_count, 1)
x2 = torch.randn(2, 3)
dks2 = torch._C._dispatch_keys(x2)
self.assertEqual(fn(x2, dks2), torch.sin(x2 + 1))
# No recompile since the dispatch key set is the same though the tensor is different.
self.assertEqual(counter.frame_count, 1)
x3 = torch.randn(2, 3, device=device_type)
dks3 = torch._C._dispatch_keys(x3)
self.assertEqual(fn(x3, dks3), torch.sin(x3 - 1))
# Re-compile since the dispatch key set is different.
self.assertEqual(counter.frame_count, 2)
def test_functorch_interpreter(self):
counter = CompileCounter()
def square_and_add(x, y):
interpreter = (
torch._functorch.pyfunctorch.retrieve_current_functorch_interpreter()
)
level = interpreter.level()
if interpreter.key() == torch._C._functorch.TransformType.Vmap:
return (x**2 + y) * level
else:
return x**2 * level
@torch.compile(backend=counter, fullgraph=True)
def fn(x, y):
return torch.vmap(square_and_add)(x, y)
x = torch.tensor([1, 2, 3, 4])
y = torch.tensor([10, 20, 30, 40])
self.assertEqual(fn(x, y), torch.tensor([11, 24, 39, 56]))
self.assertEqual(counter.frame_count, 1)
x = torch.tensor([1, 2, 3, 1])
y = torch.tensor([10, 20, 30, 10])
self.assertEqual(fn(x, y), torch.tensor([11, 24, 39, 11]))
# No recompile
self.assertEqual(counter.frame_count, 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
# Owner(s): ["module: dynamo"]
import unittest
import torch
import torch._dynamo.test_case
from torch._dynamo.testing import CompileCounter, EagerAndRecordGraphs, normalize_gm
from torch.testing._internal.common_cuda import TEST_CUDA
class PythonDispatcherTests(torch._dynamo.test_case.TestCase):
def test_dispatch_key1(self):
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
x = x + 1
return torch._C._dispatch_keys(x)
x = torch.randn(2, 3)
self.assertTrue(fn(x).raw_repr() == torch._C._dispatch_keys(x + 1).raw_repr())
def test_dispatch_key2(self):
from torch.testing._internal.two_tensor import TwoTensor
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
x = x.sin()
return torch._C._dispatch_keys(x)
x = torch.randn(3)
y = torch.randn(3)
z = TwoTensor(x, y)
self.assertTrue(fn(z).raw_repr() == torch._C._dispatch_keys(z.sin()).raw_repr())
def test_dispatch_key3(self):
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
key_set = torch._C._dispatch_tls_local_include_set()
return torch.sin(x + 1), key_set
x = torch.randn(2, 3)
self.assertEqual(fn(x)[0], torch.sin(x + 1))
self.assertTrue(
fn(x)[1].raw_repr() == torch._C._dispatch_tls_local_include_set().raw_repr()
)
def test_dispatch_key4(self):
eager = EagerAndRecordGraphs()
@torch.compile(backend=eager, fullgraph=True)
def fn(x):
key_set = torch._C._dispatch_tls_local_include_set()
key_set = key_set | torch._C._dispatch_keys(x)
key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
if key_set.highestPriorityTypeId() == torch.DispatchKey.PythonDispatcher:
return torch.sin(x + 1)
else:
return torch.sin(x - 1)
x = torch.randn(2, 3)
self.assertEqual(fn(x), torch.sin(x - 1))
graph = eager.graphs[0]
actual = normalize_gm(graph.print_readable(False))
self.assertExpectedInline(
actual,
"""\
class GraphModule(torch.nn.Module):
def forward(self, L_x_: "f32[2, 3]"):
l_x_ = L_x_
sub: "f32[2, 3]" = l_x_ - 1; l_x_ = None
sin: "f32[2, 3]" = torch.sin(sub); sub = None
return (sin,)
""", # NOQA: B950
)
@unittest.skipIf(not TEST_CUDA, "requires cuda")
def test_dispatch_key_set_guard(self):
counter = CompileCounter()
@torch.compile(backend=counter, fullgraph=True)
def fn(x, dks):
if dks.has("CPU"):
return torch.sin(x + 1)
else:
return torch.sin(x - 1)
x1 = torch.randn(2, 3)
dks1 = torch._C._dispatch_keys(x1)
self.assertEqual(fn(x1, dks1), torch.sin(x1 + 1))
self.assertEqual(counter.frame_count, 1)
x2 = torch.randn(2, 3)
dks2 = torch._C._dispatch_keys(x2)
self.assertEqual(fn(x2, dks2), torch.sin(x2 + 1))
# No recompile since the dispatch key set is the same though the tensor is different.
self.assertEqual(counter.frame_count, 1)
x3 = torch.randn(2, 3, device="cuda")
dks3 = torch._C._dispatch_keys(x3)
self.assertEqual(fn(x3, dks3), torch.sin(x3 - 1))
# Re-compile since the dispatch key set is different.
self.assertEqual(counter.frame_count, 2)
def test_functorch_interpreter(self):
counter = CompileCounter()
def square_and_add(x, y):
interpreter = (
torch._functorch.pyfunctorch.retrieve_current_functorch_interpreter()
)
level = interpreter.level()
if interpreter.key() == torch._C._functorch.TransformType.Vmap:
return (x**2 + y) * level
else:
return x**2 * level
@torch.compile(backend=counter, fullgraph=True)
def fn(x, y):
return torch.vmap(square_and_add)(x, y)
x = torch.tensor([1, 2, 3, 4])
y = torch.tensor([10, 20, 30, 40])
self.assertEqual(fn(x, y), torch.tensor([11, 24, 39, 56]))
self.assertEqual(counter.frame_count, 1)
x = torch.tensor([1, 2, 3, 1])
y = torch.tensor([10, 20, 30, 10])
self.assertEqual(fn(x, y), torch.tensor([11, 24, 39, 11]))
# No recompile
self.assertEqual(counter.frame_count, 1)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ruff: noqa
from . import tqdm as _tqdm # _tqdm is the module
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
from .tqdm import (
disable_progress_bars,
enable_progress_bars,
are_progress_bars_disabled,
tqdm,
)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# Lint as: python3
from . import tqdm as _tqdm # _tqdm is the module
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
from .tqdm import (
disable_progress_bars,
enable_progress_bars,
are_progress_bars_disabled,
tqdm,
)
|
from typing import TYPE_CHECKING
from docarray.array.storage.qdrant.backend import BackendMixin, QdrantConfig
from docarray.array.storage.qdrant.find import FindMixin
from docarray.array.storage.qdrant.getsetdel import GetSetDelMixin
from docarray.array.storage.qdrant.helper import DISTANCES
from docarray.array.storage.qdrant.seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'QdrantConfig']
if TYPE_CHECKING: # pragma: no cover
from qdrant_client import QdrantClient
from qdrant_client.http.models.models import Distance
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin):
@property
def serialize_config(self) -> dict:
return self._config.serialize_config
@property
def distance(self) -> 'Distance':
return DISTANCES[self._config.distance]
@property
def serialization_config(self) -> dict:
return self._serialize_config
@property
def n_dim(self) -> int:
return self._n_dim
@property
def collection_name(self) -> str:
return self._config.collection_name
@property
def collection_name_meta(self) -> str:
return f'{self.collection_name}_meta'
@property
def config(self):
return self._config
@property
def client(self) -> 'QdrantClient':
return self._client
@property
def scroll_batch_size(self) -> int:
return self._config.scroll_batch_size
|
from typing import TYPE_CHECKING
from docarray.array.storage.qdrant.backend import BackendMixin, QdrantConfig
from docarray.array.storage.qdrant.find import FindMixin
from docarray.array.storage.qdrant.getsetdel import GetSetDelMixin
from docarray.array.storage.qdrant.helper import DISTANCES
from docarray.array.storage.qdrant.seqlike import SequenceLikeMixin
__all__ = ['StorageMixins', 'QdrantConfig']
if TYPE_CHECKING:
from qdrant_client import QdrantClient
from qdrant_client.http.models.models import Distance
class StorageMixins(FindMixin, BackendMixin, GetSetDelMixin, SequenceLikeMixin):
@property
def serialize_config(self) -> dict:
return self._config.serialize_config
@property
def distance(self) -> 'Distance':
return DISTANCES[self._config.distance]
@property
def serialization_config(self) -> dict:
return self._serialize_config
@property
def n_dim(self) -> int:
return self._n_dim
@property
def collection_name(self) -> str:
return self._config.collection_name
@property
def collection_name_meta(self) -> str:
return f'{self.collection_name}_meta'
@property
def config(self):
return self._config
@property
def client(self) -> 'QdrantClient':
return self._client
@property
def scroll_batch_size(self) -> int:
return self._config.scroll_batch_size
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.ReciprocalRankFusionEvaluator import (
ReciprocalRankFusionEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"ReciprocalRankFusionEvaluator",
]
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.evaluation.SparseBinaryClassificationEvaluator import (
SparseBinaryClassificationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseEmbeddingSimilarityEvaluator import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseInformationRetrievalEvaluator import (
SparseInformationRetrievalEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseMSEEvaluator import (
SparseMSEEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import (
SparseNanoBEIREvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseRerankingEvaluator import (
SparseRerankingEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTranslationEvaluator import (
SparseTranslationEvaluator,
)
from sentence_transformers.sparse_encoder.evaluation.SparseTripletEvaluator import (
SparseTripletEvaluator,
)
__all__ = [
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseBinaryClassificationEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTripletEvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
]
|
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import json
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=list,
)
items_object: dict = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=dict,
)
items_str: str = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default="",
)
class Output(BlockSchema):
item: Any = SchemaField(description="The current item in the iteration")
key: Any = SchemaField(
description="The key or index of the current item in the iteration",
)
def __init__(self):
super().__init__(
id="f66a3543-28d3-4ab5-8945-9b336371e2ce",
input_schema=StepThroughItemsBlock.Input,
output_schema=StepThroughItemsBlock.Output,
categories={BlockCategory.LOGIC},
description="Iterates over a list or dictionary and outputs each item.",
test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]},
test_output=[
("item", 1),
("key", 0),
("item", 2),
("key", 1),
("item", 3),
("key", 2),
("item", {"key1": "value1", "key2": "value2"}),
("key", 3),
],
test_mock={},
)
async def run(self, input_data: Input, **kwargs) -> BlockOutput:
for data in [input_data.items, input_data.items_object, input_data.items_str]:
if not data:
continue
if isinstance(data, str):
items = json.loads(data)
else:
items = data
if isinstance(items, dict):
# If items is a dictionary, iterate over its values
for item in items.values():
yield "item", item
yield "key", item
else:
# If items is a list, iterate over the list
for index, item in enumerate(items):
yield "item", item
yield "key", index
|
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import json
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=list,
)
items_object: dict = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=dict,
)
items_str: str = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default="",
)
class Output(BlockSchema):
item: Any = SchemaField(description="The current item in the iteration")
key: Any = SchemaField(
description="The key or index of the current item in the iteration",
)
def __init__(self):
super().__init__(
id="f66a3543-28d3-4ab5-8945-9b336371e2ce",
input_schema=StepThroughItemsBlock.Input,
output_schema=StepThroughItemsBlock.Output,
categories={BlockCategory.LOGIC},
description="Iterates over a list or dictionary and outputs each item.",
test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]},
test_output=[
("item", 1),
("key", 0),
("item", 2),
("key", 1),
("item", 3),
("key", 2),
("item", {"key1": "value1", "key2": "value2"}),
("key", 3),
],
test_mock={},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
for data in [input_data.items, input_data.items_object, input_data.items_str]:
if not data:
continue
if isinstance(data, str):
items = json.loads(data)
else:
items = data
if isinstance(items, dict):
# If items is a dictionary, iterate over its values
for item in items.values():
yield "item", item
yield "key", item
else:
# If items is a list, iterate over the list
for index, item in enumerate(items):
yield "item", item
yield "key", index
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .visualization_hook import DetVisualizationHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'DetVisualizationHook',
'NumClassCheckHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkloss_hook import CheckInvalidLossHook
from .memory_profiler_hook import MemoryProfilerHook
from .num_class_check_hook import NumClassCheckHook
from .set_epoch_info_hook import SetEpochInfoHook
from .sync_norm_hook import SyncNormHook
from .yolox_mode_switch_hook import YOLOXModeSwitchHook
__all__ = [
'YOLOXModeSwitchHook', 'SyncNormHook', 'CheckInvalidLossHook',
'SetEpochInfoHook', 'MemoryProfilerHook', 'NumClassCheckHook'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import STFT
from keras.src.initializers.constant_initializers import STFT as STFTInitializer
from keras.src.initializers.constant_initializers import STFT as stft
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import Orthogonal
from keras.src.initializers.random_initializers import (
Orthogonal as OrthogonalInitializer,
)
from keras.src.initializers.random_initializers import Orthogonal as orthogonal
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""Document compressor."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from pydantic import BaseModel
from langchain_core.runnables import run_in_executor
if TYPE_CHECKING:
from collections.abc import Sequence
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
class BaseDocumentCompressor(BaseModel, ABC):
"""Base class for document compressors.
This abstraction is primarily used for
post-processing of retrieved documents.
Documents matching a given query are first retrieved.
Then the list of documents can be further processed.
For example, one could re-rank the retrieved documents
using an LLM.
**Note** users should favor using a RunnableLambda
instead of sub-classing from this interface.
"""
@abstractmethod
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Async compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
return await run_in_executor(
None, self.compress_documents, documents, query, callbacks
)
|
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from pydantic import BaseModel
from langchain_core.runnables import run_in_executor
if TYPE_CHECKING:
from collections.abc import Sequence
from langchain_core.callbacks import Callbacks
from langchain_core.documents import Document
class BaseDocumentCompressor(BaseModel, ABC):
"""Base class for document compressors.
This abstraction is primarily used for
post-processing of retrieved documents.
Documents matching a given query are first retrieved.
Then the list of documents can be further processed.
For example, one could re-rank the retrieved documents
using an LLM.
**Note** users should favor using a RunnableLambda
instead of sub-classing from this interface.
"""
@abstractmethod
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
async def acompress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""Async compress retrieved documents given the query context.
Args:
documents: The retrieved documents.
query: The query context.
callbacks: Optional callbacks to run during compression.
Returns:
The compressed documents.
"""
return await run_in_executor(
None, self.compress_documents, documents, query, callbacks
)
|
"""Init file."""
from llama_index.readers.web.async_web.base import (
AsyncWebPageReader,
)
from llama_index.readers.web.beautiful_soup_web.base import (
BeautifulSoupWebReader,
)
from llama_index.readers.web.browserbase_web.base import BrowserbaseWebReader
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
from llama_index.readers.web.hyperbrowser_web.base import HyperbrowserWebReader
from llama_index.readers.web.knowledge_base.base import (
KnowledgeBaseWebReader,
)
from llama_index.readers.web.main_content_extractor.base import (
MainContentExtractorReader,
)
from llama_index.readers.web.news.base import NewsArticleReader
from llama_index.readers.web.readability_web.base import (
ReadabilityWebPageReader,
)
from llama_index.readers.web.rss.base import (
RssReader,
)
from llama_index.readers.web.rss_news.base import (
RssNewsReader,
)
from llama_index.readers.web.scrapfly_web.base import (
ScrapflyReader,
)
from llama_index.readers.web.simple_web.base import (
SimpleWebPageReader,
)
from llama_index.readers.web.sitemap.base import (
SitemapReader,
)
from llama_index.readers.web.spider_web.base import (
SpiderWebReader,
)
from llama_index.readers.web.trafilatura_web.base import (
TrafilaturaWebReader,
)
from llama_index.readers.web.unstructured_web.base import (
UnstructuredURLLoader,
)
from llama_index.readers.web.whole_site.base import (
WholeSiteReader,
)
from llama_index.readers.web.zyte_web.base import (
ZyteWebReader,
)
__all__ = [
"AsyncWebPageReader",
"BeautifulSoupWebReader",
"BrowserbaseWebReader",
"FireCrawlWebReader",
"HyperbrowserWebReader",
"KnowledgeBaseWebReader",
"MainContentExtractorReader",
"NewsArticleReader",
"ReadabilityWebPageReader",
"RssReader",
"RssNewsReader",
"ScrapflyReader",
"SimpleWebPageReader",
"SitemapReader",
"SpiderWebReader",
"TrafilaturaWebReader",
"UnstructuredURLLoader",
"WholeSiteReader",
"ZyteWebReader",
]
|
"""Init file."""
from llama_index.readers.web.async_web.base import (
AsyncWebPageReader,
)
from llama_index.readers.web.beautiful_soup_web.base import (
BeautifulSoupWebReader,
)
from llama_index.readers.web.browserbase_web.base import BrowserbaseWebReader
from llama_index.readers.web.firecrawl_web.base import FireCrawlWebReader
from llama_index.readers.web.knowledge_base.base import (
KnowledgeBaseWebReader,
)
from llama_index.readers.web.main_content_extractor.base import (
MainContentExtractorReader,
)
from llama_index.readers.web.news.base import NewsArticleReader
from llama_index.readers.web.readability_web.base import (
ReadabilityWebPageReader,
)
from llama_index.readers.web.rss.base import (
RssReader,
)
from llama_index.readers.web.rss_news.base import (
RssNewsReader,
)
from llama_index.readers.web.scrapfly_web.base import (
ScrapflyReader,
)
from llama_index.readers.web.simple_web.base import (
SimpleWebPageReader,
)
from llama_index.readers.web.sitemap.base import (
SitemapReader,
)
from llama_index.readers.web.spider_web.base import (
SpiderWebReader,
)
from llama_index.readers.web.trafilatura_web.base import (
TrafilaturaWebReader,
)
from llama_index.readers.web.unstructured_web.base import (
UnstructuredURLLoader,
)
from llama_index.readers.web.whole_site.base import (
WholeSiteReader,
)
from llama_index.readers.web.zyte_web.base import (
ZyteWebReader,
)
__all__ = [
"AsyncWebPageReader",
"BeautifulSoupWebReader",
"BrowserbaseWebReader",
"FireCrawlWebReader",
"KnowledgeBaseWebReader",
"MainContentExtractorReader",
"NewsArticleReader",
"ReadabilityWebPageReader",
"RssReader",
"RssNewsReader",
"ScrapflyReader",
"SimpleWebPageReader",
"SitemapReader",
"SpiderWebReader",
"TrafilaturaWebReader",
"UnstructuredURLLoader",
"WholeSiteReader",
"ZyteWebReader",
]
|
_base_ = './libra-faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask-rcnn_r50_fpn_seesaw-loss_random-ms-2x_lvis-v1.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Any, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from typing import ForwardRef
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (get_origin(x) in (list, tuple, dict, set)) or is_typevar(x) or (type(x) == ForwardRef):
return False
return issubclass(x, a_tuple)
|
from typing import Any, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (get_origin(x) in (list, tuple, dict, set)) or is_typevar(x):
return False
return issubclass(x, a_tuple)
|
from collections import OrderedDict
import pytest
from docarray.array.chunk import ChunkArray
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.helper import random_port
class DummyExecutor(Executor):
def __init__(self, mode=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if mode:
self._mode = str(mode)
@requests
def do_something(self, docs, **kwargs):
for doc in docs:
if len(doc.chunks) > 0:
chunks = ChunkArray(
(d for d in doc.chunks if d.modality == self._mode), doc
)
assert chunks[0].content == self._mode
assert len(chunks) == 1
doc.chunks = chunks
class MatchMerger(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
class ChunkMerger(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].chunks.extend(doc.chunks)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
@pytest.mark.timeout(60)
@pytest.mark.parametrize('num_replicas, num_shards', [(1, 1), (2, 2)])
def test_sharding_tail_pod(num_replicas, num_shards):
"""TODO(Maximilian): Make (1, 2) and (2, 1) also workable"""
port = random_port()
f = Flow(port=port).add(
uses=DummyExecutor,
replicas=num_replicas,
shards=num_shards,
uses_after=MatchMerger,
)
with f:
results = Client(port=f.port).post(
on='/search', inputs=Document(matches=[Document()]), return_responses=True
)
assert len(results[0].docs[0].matches) == num_shards
def test_merging_head_pod():
port = random_port()
def multimodal_generator():
for i in range(0, 5):
document = Document()
document.chunks.append(Document(modality='1', content='1'))
document.chunks.append(Document(modality='2', content='2'))
yield document
f = (
Flow(port=port)
.add(uses={'jtype': 'DummyExecutor', 'with': {'mode': '1'}}, name='executor1')
.add(
uses={'jtype': 'DummyExecutor', 'with': {'mode': '2'}},
name='executor2',
needs='gateway',
)
.add(
uses_before=ChunkMerger, name='executor3', needs=['executor1', 'executor2']
)
)
with f:
results = Client(port=f.port).post(
on='/search', inputs=multimodal_generator(), return_responses=True
)
assert len(results[0].docs[0].chunks) == 2
assert len(results[0].docs) == 5
|
from collections import OrderedDict
import pytest
from docarray.array.chunk import ChunkArray
from jina import Client, Document, DocumentArray, Executor, Flow, requests
from jina.helper import random_port
class DummyExecutor(Executor):
def __init__(self, mode=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if mode:
self._mode = str(mode)
@requests
def do_something(self, docs, **kwargs):
for doc in docs:
if len(doc.chunks) > 0:
chunks = ChunkArray(
(d for d in doc.chunks if d.modality == self._mode), doc
)
assert chunks[0].content == self._mode
assert len(chunks) == 1
doc.chunks = chunks
class MatchMerger(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].matches.extend(doc.matches)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
class ChunkMerger(Executor):
@requests
def merge(self, docs_matrix, **kwargs):
results = OrderedDict()
for docs in docs_matrix:
for doc in docs:
if doc.id in results:
results[doc.id].chunks.extend(doc.chunks)
else:
results[doc.id] = doc
return DocumentArray(list(results.values()))
@pytest.mark.timeout(60)
@pytest.mark.parametrize('num_replicas, num_shards', [(1, 1), (2, 2)])
def test_sharding_tail_pod(num_replicas, num_shards):
"""TODO(Maximilian): Make (1, 2) and (2, 1) also workable"""
port = random_port()
f = Flow(port=port).add(
uses=DummyExecutor,
replicas=num_replicas,
shards=num_shards,
uses_after=MatchMerger,
)
with f:
results = Client(port=f.port).post(
on='/search', inputs=Document(matches=[Document()]), return_responses=True
)
assert len(results[0].docs[0].matches) == num_shards
def test_merging_head_pod():
port = random_port()
def multimodal_generator():
for i in range(0, 5):
document = Document()
document.chunks.append(Document(modality='1', content='1'))
document.chunks.append(Document(modality='2', content='2'))
yield document
f = (
Flow(port=port)
.add(uses={'jtype': 'DummyExecutor', 'with': {'mode': '1'}}, name='executor1')
.add(
uses={'jtype': 'DummyExecutor', 'with': {'mode': '2'}},
name='executor2',
needs='gateway',
)
.add(
uses_before=ChunkMerger, name='executor3', needs=['executor1', 'executor2']
)
)
with f:
results = Client(port=f.port).post(
on='/search', inputs=multimodal_generator(), return_responses=True
)
assert len(results[0].docs[0].chunks) == 2
assert len(results[0].docs) == 5
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._datapoint import Datapoint
class Video(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the video is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._datapoint import Datapoint
class Video(Datapoint):
"""[BETA] :class:`torch.Tensor` subclass for videos.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from typing import List
from llama_index.core.prompts.base import BasePromptTemplate
def get_empty_prompt_txt(prompt: BasePromptTemplate) -> str:
"""
Get empty prompt text.
Substitute empty strings in parts of the prompt that have
not yet been filled out. Skip variables that have already
been partially formatted. This is used to compute the initial tokens.
"""
partial_kargs = prompt.kwargs
empty_kwargs = {v: "" for v in prompt.template_vars if v not in partial_kargs}
all_kwargs = {**partial_kargs, **empty_kwargs}
return prompt.format(llm=None, **all_kwargs)
def get_biggest_prompt(prompts: List[BasePromptTemplate]) -> BasePromptTemplate:
"""
Get biggest prompt.
Oftentimes we need to fetch the biggest prompt, in order to
be the most conservative about chunking text. This
is a helper utility for that.
"""
return max(prompts, key=lambda p: len(get_empty_prompt_txt(p)))
|
from typing import List
from llama_index.core.prompts.base import BasePromptTemplate
def get_empty_prompt_txt(prompt: BasePromptTemplate) -> str:
"""
Get empty prompt text.
Substitute empty strings in parts of the prompt that have
not yet been filled out. Skip variables that have already
been partially formatted. This is used to compute the initial tokens.
"""
partial_kargs = prompt.kwargs
empty_kwargs = {v: "" for v in prompt.template_vars if v not in partial_kargs}
all_kwargs = {**partial_kargs, **empty_kwargs}
return prompt.format(llm=None, **all_kwargs)
def get_biggest_prompt(prompts: List[BasePromptTemplate]) -> BasePromptTemplate:
"""
Get biggest prompt.
Oftentimes we need to fetch the biggest prompt, in order to
be the most conservative about chunking text. This
is a helper utility for that.
"""
empty_prompt_txts = [get_empty_prompt_txt(prompt) for prompt in prompts]
empty_prompt_txt_lens = [len(txt) for txt in empty_prompt_txts]
return prompts[empty_prompt_txt_lens.index(max(empty_prompt_txt_lens))]
|
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..core import DataIter, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
def check_invalid_cat_batches(device: str) -> None:
"""Check error message for inconsistent feature types."""
class _InvalidCatIter(DataIter):
def __init__(self) -> None:
super().__init__(cache_prefix=None)
self._it = 0
def next(self, input_data: Callable) -> bool:
if self._it == 2:
return False
X, y = tm.make_categorical(
64,
12,
4,
onehot=False,
sparsity=0.5,
cat_ratio=1.0 if self._it == 0 else 0.5,
)
if device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
it = _InvalidCatIter()
import pytest
with pytest.raises(ValueError, match="Inconsistent feature types between batches"):
ExtMemQuantileDMatrix(it, enable_categorical=True)
def check_uneven_sizes(device: str) -> None:
"""Tests for having irregular data shapes."""
batches = [
tm.make_regression(n_samples, 16, use_cupy=device == "cuda")
for n_samples in [512, 256, 1024]
]
unzip = list(zip(*batches))
it = tm.IteratorForTest(unzip[0], unzip[1], None, cache="cache", on_host=True)
Xy = DMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
Xy = ExtMemQuantileDMatrix(it)
assert Xy.num_col() == 16
assert Xy.num_row() == sum(x.shape[0] for x in unzip[0])
class CatIter(DataIter): # pylint: disable=too-many-instance-attributes
"""An iterator for testing categorical features."""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
n_samples_per_batch: int,
n_features: int,
*,
n_batches: int,
n_cats: int,
sparsity: float,
cat_ratio: float,
onehot: bool,
device: str,
cache: Optional[str],
) -> None:
super().__init__(cache_prefix=cache)
self.n_batches = n_batches
self.device = device
n_samples = n_samples_per_batch * n_batches
cat, y = tm.make_categorical(
n_samples,
n_features,
n_categories=n_cats,
onehot=onehot,
cat_ratio=cat_ratio,
sparsity=sparsity,
)
xs, ys = [], []
prev = 0
for _ in range(n_batches):
n = min(n_samples_per_batch, n_samples - prev)
X = cat.iloc[prev : prev + n, :]
xs.append(X)
ys.append(y[prev : prev + n])
prev += n_samples_per_batch
self.xs = xs
self.ys = ys
self.x = cat
self.y = y
self._it = 0
def xy(self) -> tuple:
"""Return the concatenated data."""
return self.x, self.y
def next(self, input_data: Callable) -> bool:
if self._it == self.n_batches:
return False
X, y = self.xs[self._it], self.ys[self._it]
if self.device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
|
"""Tests related to the `DataIter` interface."""
from typing import Callable, Optional
import numpy as np
from xgboost import testing as tm
from ..core import DataIter, ExtMemQuantileDMatrix, QuantileDMatrix
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
def check_invalid_cat_batches(device: str) -> None:
"""Check error message for inconsistent feature types."""
class _InvalidCatIter(DataIter):
def __init__(self) -> None:
super().__init__(cache_prefix=None)
self._it = 0
def next(self, input_data: Callable) -> bool:
if self._it == 2:
return False
X, y = tm.make_categorical(
64,
12,
4,
onehot=False,
sparsity=0.5,
cat_ratio=1.0 if self._it == 0 else 0.5,
)
if device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
it = _InvalidCatIter()
import pytest
with pytest.raises(ValueError, match="Inconsistent feature types between batches"):
ExtMemQuantileDMatrix(it, enable_categorical=True)
class CatIter(DataIter): # pylint: disable=too-many-instance-attributes
"""An iterator for testing categorical features."""
def __init__( # pylint: disable=too-many-arguments,too-many-locals
self,
n_samples_per_batch: int,
n_features: int,
*,
n_batches: int,
n_cats: int,
sparsity: float,
cat_ratio: float,
onehot: bool,
device: str,
cache: Optional[str],
) -> None:
super().__init__(cache_prefix=cache)
self.n_batches = n_batches
self.device = device
n_samples = n_samples_per_batch * n_batches
cat, y = tm.make_categorical(
n_samples,
n_features,
n_categories=n_cats,
onehot=onehot,
cat_ratio=cat_ratio,
sparsity=sparsity,
)
xs, ys = [], []
prev = 0
for _ in range(n_batches):
n = min(n_samples_per_batch, n_samples - prev)
X = cat.iloc[prev : prev + n, :]
xs.append(X)
ys.append(y[prev : prev + n])
prev += n_samples_per_batch
self.xs = xs
self.ys = ys
self.x = cat
self.y = y
self._it = 0
def xy(self) -> tuple:
"""Return the concatenated data."""
return self.x, self.y
def next(self, input_data: Callable) -> bool:
if self._it == self.n_batches:
return False
X, y = self.xs[self._it], self.ys[self._it]
if self.device == "cuda":
import cudf # pylint: disable=import-error
import cupy # pylint: disable=import-error
X = cudf.DataFrame(X)
y = cupy.array(y)
input_data(data=X, label=y)
self._it += 1
return True
def reset(self) -> None:
self._it = 0
|
"""Parsing utils to go from string to AgentAction or Agent Finish.
AgentAction means that an action should be taken.
This contains the name of the tool to use, the input to pass to that tool,
and a `log` variable (which contains a log of the agent's thinking).
AgentFinish means that a response should be given.
This contains a `return_values` dictionary. This usually contains a
single `output` key, but can be extended to contain more.
This also contains a `log` variable (which contains a log of the agent's thinking).
"""
from langchain.agents.output_parsers.json import JSONAgentOutputParser
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)
from langchain.agents.output_parsers.react_json_single_input import (
ReActJsonSingleInputOutputParser,
)
from langchain.agents.output_parsers.react_single_input import (
ReActSingleInputOutputParser,
)
from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
__all__ = [
"JSONAgentOutputParser",
"OpenAIFunctionsAgentOutputParser",
"ReActJsonSingleInputOutputParser",
"ReActSingleInputOutputParser",
"SelfAskOutputParser",
"ToolsAgentOutputParser",
"XMLAgentOutputParser",
]
|
"""Parsing utils to go from string to AgentAction or Agent Finish.
AgentAction means that an action should be taken.
This contains the name of the tool to use, the input to pass to that tool,
and a `log` variable (which contains a log of the agent's thinking).
AgentFinish means that a response should be given.
This contains a `return_values` dictionary. This usually contains a
single `output` key, but can be extended to contain more.
This also contains a `log` variable (which contains a log of the agent's thinking).
"""
from langchain.agents.output_parsers.json import JSONAgentOutputParser
from langchain.agents.output_parsers.openai_functions import (
OpenAIFunctionsAgentOutputParser,
)
from langchain.agents.output_parsers.react_json_single_input import (
ReActJsonSingleInputOutputParser,
)
from langchain.agents.output_parsers.react_single_input import (
ReActSingleInputOutputParser,
)
from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
from langchain.agents.output_parsers.tools import ToolsAgentOutputParser
from langchain.agents.output_parsers.xml import XMLAgentOutputParser
__all__ = [
"ReActSingleInputOutputParser",
"SelfAskOutputParser",
"ToolsAgentOutputParser",
"ReActJsonSingleInputOutputParser",
"OpenAIFunctionsAgentOutputParser",
"XMLAgentOutputParser",
"JSONAgentOutputParser",
]
|
from llama_index_instrumentation.span_handlers.base import BaseSpanHandler
from llama_index_instrumentation.span_handlers.null import NullSpanHandler
from llama_index_instrumentation.span_handlers.simple import SimpleSpanHandler
__all__ = [
"BaseSpanHandler",
"NullSpanHandler",
"SimpleSpanHandler",
]
|
from llama_index.core.instrumentation.span_handlers.base import BaseSpanHandler
from llama_index.core.instrumentation.span_handlers.null import NullSpanHandler
from llama_index.core.instrumentation.span_handlers.simple import SimpleSpanHandler
__all__ = [
"BaseSpanHandler",
"NullSpanHandler",
"SimpleSpanHandler",
]
|
import importlib
from types import ModuleType
import pytest
from fastapi.exceptions import FastAPIError
from fastapi.testclient import TestClient
from ...utils import needs_py39
@pytest.fixture(
name="mod",
params=[
"tutorial008c",
"tutorial008c_an",
pytest.param("tutorial008c_an_py39", marks=needs_py39),
],
)
def get_mod(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.dependencies.{request.param}")
return mod
def test_get_no_item(mod: ModuleType):
client = TestClient(mod.app)
response = client.get("/items/foo")
assert response.status_code == 404, response.text
assert response.json() == {"detail": "Item not found, there's only a plumbus here"}
def test_get(mod: ModuleType):
client = TestClient(mod.app)
response = client.get("/items/plumbus")
assert response.status_code == 200, response.text
assert response.json() == "plumbus"
def test_fastapi_error(mod: ModuleType):
client = TestClient(mod.app)
with pytest.raises(FastAPIError) as exc_info:
client.get("/items/portal-gun")
assert "No response object was returned" in exc_info.value.args[0]
def test_internal_server_error(mod: ModuleType):
client = TestClient(mod.app, raise_server_exceptions=False)
response = client.get("/items/portal-gun")
assert response.status_code == 500, response.text
assert response.text == "Internal Server Error"
|
import pytest
from fastapi.exceptions import FastAPIError
from fastapi.testclient import TestClient
@pytest.fixture(name="client")
def get_client():
from docs_src.dependencies.tutorial008c import app
client = TestClient(app)
return client
def test_get_no_item(client: TestClient):
response = client.get("/items/foo")
assert response.status_code == 404, response.text
assert response.json() == {"detail": "Item not found, there's only a plumbus here"}
def test_get(client: TestClient):
response = client.get("/items/plumbus")
assert response.status_code == 200, response.text
assert response.json() == "plumbus"
def test_fastapi_error(client: TestClient):
with pytest.raises(FastAPIError) as exc_info:
client.get("/items/portal-gun")
assert "No response object was returned" in exc_info.value.args[0]
def test_internal_server_error():
from docs_src.dependencies.tutorial008c import app
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/items/portal-gun")
assert response.status_code == 500, response.text
assert response.text == "Internal Server Error"
|
# ruff: noqa
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
|
# flake8: noqa
__all__ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
|
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.saving import load_model
class MaskingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
@pytest.mark.requires_trainable_backend
def test_masking_with_tensor(self):
model = models.Sequential(
[
layers.Masking(mask_value=ops.convert_to_tensor([0.0])),
layers.LSTM(1),
]
)
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
model(x)
model.save("model.keras")
reload_model = load_model("model.keras")
reload_model(x)
|
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import testing
class MaskingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
|
import gzip
import logging
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, evaluation, losses, models, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = "roberta-base"
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
output_path = "output/askubuntu-simcse-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs * len(train_dataloader) * 0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True, # If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from sentence_transformers import SentenceTransformer, LoggingHandler, InputExample
from sentence_transformers import models, util, evaluation, losses
import logging
import os
import gzip
from torch.utils.data import DataLoader
from datetime import datetime
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Some training parameters. For the example, we use a batch_size of 128, a max sentence length (max_seq_length)
# of 32 word pieces and as model roberta-base
model_name = "roberta-base"
batch_size = 128
max_seq_length = 32
num_epochs = 1
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
output_path = "output/askubuntu-simcse-{}-{}-{}".format(
model_name, batch_size, datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
## Now we need a list of train sentences.
## In this example we simply use all sentences that don't appear in the train/dev set
train_sentences = []
for id, sentence in corpus.items():
if id not in dev_test_ids:
train_sentences.append(InputExample(texts=[sentence, sentence]))
logging.info("{} train sentences".format(len(train_sentences)))
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Train the model #################
# As Loss function, we use MultipleNegativesRankingLoss
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.MultipleNegativesRankingLoss(model)
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Dev performance before training")
dev_evaluator(model)
warmup_steps = int(num_epochs * len(train_dataloader) * 0.1)
logging.info("Start training")
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
evaluation_steps=100,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=output_path,
show_progress_bar=True,
use_amp=True, # If your GPU does not have FP16 cores, set use_amp=False
)
latest_output_path = output_path + "-latest"
model.save(latest_output_path)
### Run test evaluation on the latest model. This is equivalent to not having a dev dataset
model = SentenceTransformer(latest_output_path)
test_evaluator(model)
|
from typing import List
import pytest
from sqlalchemy import create_engine, text
from llama_index.readers.database import DatabaseReader
from llama_index.core.schema import Document
# --------------------------------------------------------------------------- #
# Fixtures
# --------------------------------------------------------------------------- #
@pytest.fixture(scope="module")
def sqlite_engine():
"""
Return an in-memory SQLite engine using a URI that allows sharing
the database across connections within the same process.
"""
# This URI creates a named in-memory database that persists
# as long as at least one connection is open, and is shareable.
db_uri = (
"sqlite:///file:llamaindex_reader_test_db?mode=memory&cache=shared&uri=true"
)
engine = create_engine(db_uri, future=True)
# Set up schema + sample data (ensure clean state first)
with engine.begin() as conn:
# Drop table if it exists from a previous potentially failed run
conn.execute(text("DROP TABLE IF EXISTS items"))
# Create table (no schema prefix)
conn.execute(
text(
"""
CREATE TABLE items (
id INTEGER PRIMARY KEY,
name TEXT,
value INTEGER
)
"""
)
)
# Insert data (no schema prefix)
conn.execute(
text(
"""
INSERT INTO items (name, value)
VALUES ('foo', 10), ('bar', 20)
"""
)
)
# The engine is now configured with a persistent in-memory DB
# containing the 'items' table.
return engine
# Optional teardown: dispose engine if needed, though usually not required
# engine.dispose()
# --------------------------------------------------------------------------- #
# Helpers
# --------------------------------------------------------------------------- #
def _create_reader(engine):
"""Utility to build a DatabaseReader for the given engine."""
return DatabaseReader(engine=engine)
def _get_all_docs(reader: DatabaseReader, **kwargs) -> List[Document]:
"""Convenience wrapper that returns a list of Documents."""
return reader.load_data(
query="SELECT id, name, value FROM items ORDER BY id",
**kwargs,
)
# --------------------------------------------------------------------------- #
# Tests
# --------------------------------------------------------------------------- #
def test_load_data_basic(sqlite_engine):
"""It should return two Document objects with concatenated text."""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(reader)
assert len(docs) == 2
assert docs[0].text_resource and docs[0].text_resource.text
assert docs[0].text_resource.text.startswith("id: 1")
assert docs[0].text_resource.text.endswith("value: 10")
def test_metadata_and_exclusion(sqlite_engine):
"""
`metadata_cols` should be promoted to metadata and
`excluded_text_cols` should remove columns from text.
"""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(
reader,
metadata_cols=[("id", "item_id"), "value"],
excluded_text_cols=["value"],
)
doc = docs[0]
# `value` excluded from text, included as metadata
assert "value:" not in doc.text
assert doc.metadata == {"item_id": 1, "value": 10}
def test_resource_id_fn(sqlite_engine):
"""Custom `document_id` should drive `doc_id`."""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(
reader,
document_id=lambda row: f"custom-{row['id']}",
)
assert docs[0].id_ == "custom-1"
assert docs[1].id_ == "custom-2"
def test_lazy_load_data_generator(sqlite_engine):
"""`lazy_load_data` should yield Documents lazily."""
reader = _create_reader(sqlite_engine)
gen = reader.lazy_load_data(query="SELECT * FROM items")
docs = list(gen)
assert len(docs) == 2
assert all(hasattr(d, "text_resource") for d in docs)
assert all(hasattr(d.text_resource, "text") for d in docs)
@pytest.mark.asyncio
async def test_aload_data_async(sqlite_engine):
"""`aload_data` wraps the sync loader via asyncio.to_thread()."""
reader = _create_reader(sqlite_engine)
docs = await reader.aload_data(query="SELECT * FROM items")
assert len(docs) == 2
assert docs[0].text_resource and docs[0].text_resource.text
assert docs[0].text_resource.text.startswith("id: 1")
|
from typing import List
import pytest
from sqlalchemy import create_engine, text
from llama_index.readers.database import DatabaseReader
from llama_index.core.schema import Document
# --------------------------------------------------------------------------- #
# Fixtures
# --------------------------------------------------------------------------- #
@pytest.fixture(scope="module")
def sqlite_engine():
"""
Return an in-memory SQLite engine using a URI that allows sharing
the database across connections within the same process.
"""
# This URI creates a named in-memory database that persists
# as long as at least one connection is open, and is shareable.
db_uri = (
"sqlite:///file:llamaindex_reader_test_db?mode=memory&cache=shared&uri=true"
)
engine = create_engine(db_uri, future=True)
# Set up schema + sample data (ensure clean state first)
with engine.begin() as conn:
# Drop table if it exists from a previous potentially failed run
conn.execute(text("DROP TABLE IF EXISTS items"))
# Create table (no schema prefix)
conn.execute(
text(
"""
CREATE TABLE items (
id INTEGER PRIMARY KEY,
name TEXT,
value INTEGER
)
"""
)
)
# Insert data (no schema prefix)
conn.execute(
text(
"""
INSERT INTO items (name, value)
VALUES ('foo', 10), ('bar', 20)
"""
)
)
# The engine is now configured with a persistent in-memory DB
# containing the 'items' table.
return engine
# Optional teardown: dispose engine if needed, though usually not required
# engine.dispose()
# --------------------------------------------------------------------------- #
# Helpers
# --------------------------------------------------------------------------- #
def _create_reader(engine):
"""Utility to build a DatabaseReader for the given engine."""
return DatabaseReader(engine=engine)
def _get_all_docs(reader: DatabaseReader, **kwargs) -> List[Document]:
"""Convenience wrapper that returns a list of Documents."""
return reader.load_data(
query="SELECT id, name, value FROM items ORDER BY id",
**kwargs,
)
# --------------------------------------------------------------------------- #
# Tests
# --------------------------------------------------------------------------- #
def test_load_data_basic(sqlite_engine):
"""It should return two Document objects with concatenated text."""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(reader)
assert len(docs) == 2
assert docs[0].text_resource and docs[0].text_resource.text
assert docs[0].text_resource.text.startswith("id: 1")
assert docs[0].text_resource.text.endswith("value: 10")
def test_metadata_and_exclusion(sqlite_engine):
"""
`metadata_cols` should be promoted to metadata and
`excluded_text_cols` should remove columns from text.
"""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(
reader,
metadata_cols=[("id", "item_id"), "value"],
excluded_text_cols=["value"],
)
doc = docs[0]
# `value` excluded from text, included as metadata
assert "value:" not in doc.text
assert doc.metadata == {"item_id": 1, "value": 10}
def test_resource_id_fn(sqlite_engine):
"""Custom `document_id` should drive `doc_id`."""
reader = _create_reader(sqlite_engine)
docs = _get_all_docs(
reader,
document_id=lambda row: f"custom-{row['id']}",
)
assert docs[0].id_ == "custom-1"
assert docs[1].id_ == "custom-2"
def test_lazy_load_data_generator(sqlite_engine):
"""`lazy_load_data` should yield Documents lazily."""
reader = _create_reader(sqlite_engine)
gen = reader.lazy_load_data(query="SELECT * FROM items")
docs = list(gen)
assert len(docs) == 2
assert all(hasattr(d, "text_resource") for d in docs)
assert all(hasattr(d.text_resource, "text") for d in docs)
@pytest.mark.asyncio()
async def test_aload_data_async(sqlite_engine):
"""`aload_data` wraps the sync loader via asyncio.to_thread()."""
reader = _create_reader(sqlite_engine)
docs = await reader.aload_data(query="SELECT * FROM items")
assert len(docs) == 2
assert docs[0].text_resource and docs[0].text_resource.text
assert docs[0].text_resource.text.startswith("id: 1")
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.4'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE as MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths as assert_same_paths
from keras.src.tree.tree_api import (
assert_same_structure as assert_same_structure,
)
from keras.src.tree.tree_api import flatten as flatten
from keras.src.tree.tree_api import flatten_with_path as flatten_with_path
from keras.src.tree.tree_api import is_nested as is_nested
from keras.src.tree.tree_api import lists_to_tuples as lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure as map_shape_structure
from keras.src.tree.tree_api import map_structure as map_structure
from keras.src.tree.tree_api import map_structure_up_to as map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as as pack_sequence_as
from keras.src.tree.tree_api import traverse as traverse
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
from sentence_transformers import SentenceTransformer
import torch
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
top_results = torch.topk(similarity_scores, k=top_k)
print("\n\n======================\n\n")
print("Query:", query)
print("\nTop 5 most similar sentences in corpus:")
for score, idx in zip(top_results[0], top_results[1]):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
from sentence_transformers import SentenceTransformer, util
import torch
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
cos_scores = util.cos_sim(query_embedding, corpus_embeddings)[0]
top_results = torch.topk(cos_scores, k=top_k)
print("\n\n======================\n\n")
print("Query:", query)
print("\nTop 5 most similar sentences in corpus:")
for score, idx in zip(top_results[0], top_results[1]):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
import os
from shutil import rmtree
from typing import Callable, Dict, List, Optional
import tqdm
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.utils import get_cache_dir
class BeirEvaluator:
"""
Refer to: https://github.com/beir-cellar/beir for a full list of supported datasets
and a full description of BEIR.
"""
def __init__(self) -> None:
try:
pass
except ImportError:
raise ImportError(
"Please install beir to use this feature: `pip install beir`",
)
def _download_datasets(self, datasets: List[str] = ["nfcorpus"]) -> Dict[str, str]:
from beir import util
cache_dir = get_cache_dir()
dataset_paths = {}
for dataset in datasets:
dataset_full_path = os.path.join(cache_dir, "datasets", "BeIR__" + dataset)
if not os.path.exists(dataset_full_path):
url = f"""https://public.ukp.informatik.tu-darmstadt.de/thakur\
/BEIR/datasets/{dataset}.zip"""
try:
util.download_and_unzip(url, dataset_full_path)
except Exception as e:
print(
"Dataset:", dataset, "not found at:", url, "Removing cached dir"
)
rmtree(dataset_full_path)
raise ValueError(f"invalid BEIR dataset: {dataset}") from e
print("Dataset:", dataset, "downloaded at:", dataset_full_path)
dataset_paths[dataset] = os.path.join(dataset_full_path, dataset)
return dataset_paths
def run(
self,
create_retriever: Callable[[List[Document]], BaseRetriever],
datasets: List[str] = ["nfcorpus"],
metrics_k_values: List[int] = [3, 10],
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
from beir.datasets.data_loader import GenericDataLoader
from beir.retrieval.evaluation import EvaluateRetrieval
dataset_paths = self._download_datasets(datasets)
for dataset in datasets:
dataset_path = dataset_paths[dataset]
print("Evaluating on dataset:", dataset)
print("-------------------------------------")
corpus, queries, qrels = GenericDataLoader(data_folder=dataset_path).load(
split="test"
)
documents = []
for id, val in corpus.items():
doc = Document(
text=val["text"], metadata={"title": val["title"], "doc_id": id}
)
documents.append(doc)
retriever = create_retriever(documents)
print("Retriever created for: ", dataset)
print("Evaluating retriever on questions against qrels")
results = {}
for key, query in tqdm.tqdm(queries.items()):
nodes_with_score = retriever.retrieve(query)
node_postprocessors = node_postprocessors or []
for node_postprocessor in node_postprocessors:
nodes_with_score = node_postprocessor.postprocess_nodes(
nodes_with_score, query_bundle=QueryBundle(query_str=query)
)
results[key] = {
node.node.metadata["doc_id"]: node.score
for node in nodes_with_score
}
ndcg, map_, recall, precision = EvaluateRetrieval.evaluate(
qrels, results, metrics_k_values
)
print("Results for:", dataset)
for k in metrics_k_values:
print(
{
f"NDCG@{k}": ndcg[f"NDCG@{k}"],
f"MAP@{k}": map_[f"MAP@{k}"],
f"Recall@{k}": recall[f"Recall@{k}"],
f"precision@{k}": precision[f"P@{k}"],
}
)
print("-------------------------------------")
|
import os
from shutil import rmtree
from typing import Callable, Dict, List, Optional
import tqdm
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.utils import get_cache_dir
class BeirEvaluator:
"""
Refer to: https://github.com/beir-cellar/beir for a full list of supported datasets
and a full description of BEIR.
"""
def __init__(self) -> None:
try:
pass
except ImportError:
raise ImportError(
"Please install beir to use this feature: " "`pip install beir`",
)
def _download_datasets(self, datasets: List[str] = ["nfcorpus"]) -> Dict[str, str]:
from beir import util
cache_dir = get_cache_dir()
dataset_paths = {}
for dataset in datasets:
dataset_full_path = os.path.join(cache_dir, "datasets", "BeIR__" + dataset)
if not os.path.exists(dataset_full_path):
url = f"""https://public.ukp.informatik.tu-darmstadt.de/thakur\
/BEIR/datasets/{dataset}.zip"""
try:
util.download_and_unzip(url, dataset_full_path)
except Exception as e:
print(
"Dataset:", dataset, "not found at:", url, "Removing cached dir"
)
rmtree(dataset_full_path)
raise ValueError(f"invalid BEIR dataset: {dataset}") from e
print("Dataset:", dataset, "downloaded at:", dataset_full_path)
dataset_paths[dataset] = os.path.join(dataset_full_path, dataset)
return dataset_paths
def run(
self,
create_retriever: Callable[[List[Document]], BaseRetriever],
datasets: List[str] = ["nfcorpus"],
metrics_k_values: List[int] = [3, 10],
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
from beir.datasets.data_loader import GenericDataLoader
from beir.retrieval.evaluation import EvaluateRetrieval
dataset_paths = self._download_datasets(datasets)
for dataset in datasets:
dataset_path = dataset_paths[dataset]
print("Evaluating on dataset:", dataset)
print("-------------------------------------")
corpus, queries, qrels = GenericDataLoader(data_folder=dataset_path).load(
split="test"
)
documents = []
for id, val in corpus.items():
doc = Document(
text=val["text"], metadata={"title": val["title"], "doc_id": id}
)
documents.append(doc)
retriever = create_retriever(documents)
print("Retriever created for: ", dataset)
print("Evaluating retriever on questions against qrels")
results = {}
for key, query in tqdm.tqdm(queries.items()):
nodes_with_score = retriever.retrieve(query)
node_postprocessors = node_postprocessors or []
for node_postprocessor in node_postprocessors:
nodes_with_score = node_postprocessor.postprocess_nodes(
nodes_with_score, query_bundle=QueryBundle(query_str=query)
)
results[key] = {
node.node.metadata["doc_id"]: node.score
for node in nodes_with_score
}
ndcg, map_, recall, precision = EvaluateRetrieval.evaluate(
qrels, results, metrics_k_values
)
print("Results for:", dataset)
for k in metrics_k_values:
print(
{
f"NDCG@{k}": ndcg[f"NDCG@{k}"],
f"MAP@{k}": map_[f"MAP@{k}"],
f"Recall@{k}": recall[f"Recall@{k}"],
f"precision@{k}": precision[f"P@{k}"],
}
)
print("-------------------------------------")
|
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomContrast")
class RandomContrast(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images
by a random factor. Contrast is adjusted independently
for each channel of each image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound.
When represented as a single float, lower = upper.
The contrast factor will be randomly picked between
`[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean`
where `mean` is the mean value of the channel.
seed: Integer. Used to create a random seed.
"""
_FACTOR_BOUNDS = (0, 1)
def __init__(self, factor, seed=None, **kwargs):
super().__init__(**kwargs)
self._set_factor(factor)
self.seed = seed
self.generator = SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
factor_shape = (1, 1, 1)
elif rank == 4:
# Keep only the batch dim. This will ensure to have same adjustment
# with in one image, but different across the images.
factor_shape = [images_shape[0], 1, 1, 1]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
if not training:
return {"contrast_factor": self.backend.numpy.zeros(factor_shape)}
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
shape=factor_shape,
minval=1.0 - self.factor[0],
maxval=1.0 + self.factor[1],
seed=seed,
dtype=self.compute_dtype,
)
return {"contrast_factor": factor}
def transform_images(self, images, transformation, training=True):
if training:
constrast_factor = transformation["contrast_factor"]
outputs = self._adjust_constrast(images, constrast_factor)
outputs = self.backend.numpy.clip(outputs, 0, 255)
self.backend.numpy.reshape(outputs, self.backend.shape(images))
return outputs
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def _adjust_constrast(self, inputs, contrast_factor):
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
# reduce mean on height
inp_mean = self.backend.numpy.mean(
inputs, axis=height_axis, keepdims=True
)
# reduce mean on width
inp_mean = self.backend.numpy.mean(
inp_mean, axis=width_axis, keepdims=True
)
outputs = (inputs - inp_mean) * contrast_factor + inp_mean
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.base_image_preprocessing_layer import ( # noqa: E501
BaseImagePreprocessingLayer,
)
from keras.src.random.seed_generator import SeedGenerator
@keras_export("keras.layers.RandomContrast")
class RandomContrast(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly adjusts contrast during training.
This layer will randomly adjust the contrast of an image or images
by a random factor. Contrast is adjusted independently
for each channel of each image during training.
For each channel, this layer computes the mean of the image pixels in the
channel and then adjusts each component `x` of each pixel to
`(x - mean) * contrast_factor + mean`.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
in integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
factor: a positive float represented as fraction of value, or a tuple of
size 2 representing lower and upper bound.
When represented as a single float, lower = upper.
The contrast factor will be randomly picked between
`[1.0 - lower, 1.0 + upper]`. For any pixel x in the channel,
the output will be `(x - mean) * factor + mean`
where `mean` is the mean value of the channel.
seed: Integer. Used to create a random seed.
"""
_FACTOR_BOUNDS = (0, 1)
def __init__(self, factor, seed=None, **kwargs):
super().__init__(**kwargs)
self._set_factor(factor)
self.seed = seed
self.generator = SeedGenerator(seed)
def get_random_transformation(self, data, training=True, seed=None):
if isinstance(data, dict):
images = data["images"]
else:
images = data
images_shape = self.backend.shape(images)
rank = len(images_shape)
if rank == 3:
factor_shape = (1, 1, 1)
elif rank == 4:
# Keep only the batch dim. This will ensure to have same adjustment
# with in one image, but different across the images.
factor_shape = [images_shape[0], 1, 1, 1]
else:
raise ValueError(
"Expected the input image to be rank 3 or 4. Received "
f"inputs.shape={images_shape}"
)
if not training:
return {"contrast_factor": self.backend.numpy.zeros(factor_shape)}
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
factor = self.backend.random.uniform(
shape=factor_shape,
minval=1.0 - self.factor[0],
maxval=1.0 + self.factor[1],
seed=seed,
dtype=self.compute_dtype,
)
return {"contrast_factor": factor}
def augment_images(self, images, transformation, training=True):
if training:
constrast_factor = transformation["contrast_factor"]
outputs = self._adjust_constrast(images, constrast_factor)
outputs = self.backend.numpy.clip(outputs, 0, 255)
self.backend.numpy.reshape(outputs, self.backend.shape(images))
return outputs
return images
def augment_labels(self, labels, transformation, training=True):
return labels
def augment_bounding_boxes(
self, bounding_boxes, transformation, training=True
):
return bounding_boxes
def augment_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return segmentation_masks
def _adjust_constrast(self, inputs, contrast_factor):
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
# reduce mean on height
inp_mean = self.backend.numpy.mean(
inputs, axis=height_axis, keepdims=True
)
# reduce mean on width
inp_mean = self.backend.numpy.mean(
inp_mean, axis=width_axis, keepdims=True
)
outputs = (inputs - inp_mean) * contrast_factor + inp_mean
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
"factor": self.factor,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
|
import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
return len(text) / len(zlib.compress(text.encode("utf-8")))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
def write_txt(transcript: Iterator[dict], file: TextIO):
for segment in transcript:
print(segment['text'].strip(), file=file, flush=True)
def write_vtt(transcript: Iterator[dict], file: TextIO):
print("WEBVTT\n", file=file)
for segment in transcript:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
Write a transcript to a file in SRT format.
Example usage:
from pathlib import Path
from whisper.utils import write_srt
result = transcribe(model, audio_path, temperature=temperature, **args)
# save SRT
audio_basename = Path(audio_path).stem
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
"""
for i, segment in enumerate(transcript, start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
|
import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
return len(text) / len(zlib.compress(text.encode("utf-8")))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
def write_txt(transcript: Iterator[dict], file: TextIO):
for segment in transcript:
print(segment['text'].strip(), file=file, flush=True)
def write_vtt(transcript: Iterator[dict], file: TextIO):
print("WEBVTT\n", file=file)
for segment in transcript:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].replace('-->', '->')}\n",
file=file,
flush=True,
)
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
Write a transcript to a file in SRT format.
Example usage:
from pathlib import Path
from whisper.utils import write_srt
result = transcribe(model, audio_path, temperature=temperature, **args)
# save SRT
audio_basename = Path(audio_path).stem
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
"""
for i, segment in enumerate(transcript, start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
|
from typing import List, Optional
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.storage.index_store.types import BaseIndexStore
from llama_index.core.storage.index_store.utils import (
index_struct_to_json,
json_to_index_struct,
)
from llama_index.core.storage.kvstore.types import BaseKVStore
DEFAULT_NAMESPACE = "index_store"
DEFAULT_COLLECTION_SUFFIX = "/data"
class KVIndexStore(BaseIndexStore):
"""
Key-Value Index store.
Args:
kvstore (BaseKVStore): key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the collection name
"""
def __init__(
self,
kvstore: BaseKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a KVIndexStore."""
self._kvstore = kvstore
self._namespace = namespace or DEFAULT_NAMESPACE
self._collection_suffix = collection_suffix or DEFAULT_COLLECTION_SUFFIX
self._collection = f"{self._namespace}{self._collection_suffix}"
def add_index_struct(self, index_struct: IndexStruct) -> None:
"""
Add an index struct.
Args:
index_struct (IndexStruct): index struct
"""
key = index_struct.index_id
data = index_struct_to_json(index_struct)
self._kvstore.put(key, data, collection=self._collection)
def delete_index_struct(self, key: str) -> None:
"""
Delete an index struct.
Args:
key (str): index struct key
"""
self._kvstore.delete(key, collection=self._collection)
def get_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
"""
Get an index struct.
Args:
struct_id (Optional[str]): index struct id
"""
if struct_id is None:
structs = self.index_structs()
assert len(structs) == 1
return structs[0]
else:
json = self._kvstore.get(struct_id, collection=self._collection)
if json is None:
return None
return json_to_index_struct(json)
def index_structs(self) -> List[IndexStruct]:
"""
Get all index structs.
Returns:
List[IndexStruct]: index structs
"""
jsons = self._kvstore.get_all(collection=self._collection)
return [json_to_index_struct(json) for json in jsons.values()]
|
from typing import List, Optional
from llama_index.core.data_structs.data_structs import IndexStruct
from llama_index.core.storage.index_store.types import BaseIndexStore
from llama_index.core.storage.index_store.utils import (
index_struct_to_json,
json_to_index_struct,
)
from llama_index.core.storage.kvstore.types import BaseKVStore
DEFAULT_NAMESPACE = "index_store"
DEFAULT_COLLECTION_SUFFIX = "/data"
class KVIndexStore(BaseIndexStore):
"""Key-Value Index store.
Args:
kvstore (BaseKVStore): key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the collection name
"""
def __init__(
self,
kvstore: BaseKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a KVIndexStore."""
self._kvstore = kvstore
self._namespace = namespace or DEFAULT_NAMESPACE
self._collection_suffix = collection_suffix or DEFAULT_COLLECTION_SUFFIX
self._collection = f"{self._namespace}{self._collection_suffix}"
def add_index_struct(self, index_struct: IndexStruct) -> None:
"""Add an index struct.
Args:
index_struct (IndexStruct): index struct
"""
key = index_struct.index_id
data = index_struct_to_json(index_struct)
self._kvstore.put(key, data, collection=self._collection)
def delete_index_struct(self, key: str) -> None:
"""Delete an index struct.
Args:
key (str): index struct key
"""
self._kvstore.delete(key, collection=self._collection)
def get_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
"""Get an index struct.
Args:
struct_id (Optional[str]): index struct id
"""
if struct_id is None:
structs = self.index_structs()
assert len(structs) == 1
return structs[0]
else:
json = self._kvstore.get(struct_id, collection=self._collection)
if json is None:
return None
return json_to_index_struct(json)
def index_structs(self) -> List[IndexStruct]:
"""Get all index structs.
Returns:
List[IndexStruct]: index structs
"""
jsons = self._kvstore.get_all(collection=self._collection)
return [json_to_index_struct(json) for json in jsons.values()]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.graphs.graph_document import (
GraphDocument,
Node,
Relationship,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Node": "langchain_community.graphs.graph_document",
"Relationship": "langchain_community.graphs.graph_document",
"GraphDocument": "langchain_community.graphs.graph_document",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GraphDocument",
"Node",
"Relationship",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.graphs.graph_document import (
GraphDocument,
Node,
Relationship,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Node": "langchain_community.graphs.graph_document",
"Relationship": "langchain_community.graphs.graph_document",
"GraphDocument": "langchain_community.graphs.graph_document",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Node",
"Relationship",
"GraphDocument",
]
|
from typing import List, _LiteralGenericAlias, get_args, Tuple
import kuzu
Triple = Tuple[str, str, str]
def create_fresh_database(db: str) -> None:
"""
Create a new Kùzu database by removing existing database directory and its contents.
"""
import shutil
shutil.rmtree(db, ignore_errors=True)
def get_list_from_literal(literal: _LiteralGenericAlias) -> List[str]:
"""
Get a list of strings from a Literal type.
Parameters
----------
literal (_LiteralGenericAlias): The Literal type from which to extract the strings.
Returns
-------
List[str]: A list of strings extracted from the Literal type.
"""
if not isinstance(literal, _LiteralGenericAlias):
raise TypeError(
f"{literal} must be a Literal type.\nTry using typing.Literal{literal}."
)
return list(get_args(literal))
def remove_empty_values(input_dict):
"""
Remove entries with empty values from the dictionary.
Parameters
----------
input_dict (dict): The dictionary from which empty values need to be removed.
Returns
-------
dict: A new dictionary with all empty values removed.
"""
# Create a new dictionary excluding empty values and remove the `e.` prefix from the keys
return {key.replace("e.", ""): value for key, value in input_dict.items() if value}
def get_filtered_props(records: dict, filter_list: List[str]) -> dict:
return {k: v for k, v in records.items() if k not in filter_list}
# Lookup entry by middle value of tuple
def lookup_relation(relation: str, triples: List[Triple]) -> Triple:
"""
Look up a triple in a list of triples by the middle value.
"""
for triple in triples:
if triple[1] == relation:
return triple
return None
def create_chunk_node_table(connection: kuzu.Connection) -> None:
# For now, the additional `properties` dict from LlamaIndex is stored as a string
# TODO: See if it makes sense to add better support for property metadata as columns
connection.execute(
f"""
CREATE NODE TABLE IF NOT EXISTS Chunk (
id STRING,
text STRING,
label STRING,
embedding DOUBLE[],
creation_date DATE,
last_modified_date DATE,
file_name STRING,
file_path STRING,
file_size INT64,
file_type STRING,
ref_doc_id STRING,
PRIMARY KEY(id)
)
"""
)
def create_entity_node_tables(connection: kuzu.Connection, entities: List[str]) -> None:
for tbl_name in entities:
# For now, the additional `properties` dict from LlamaIndex is stored as a string
# TODO: See if it makes sense to add better support for property metadata as columns
connection.execute(
f"""
CREATE NODE TABLE IF NOT EXISTS {tbl_name} (
id STRING,
name STRING,
label STRING,
embedding DOUBLE[],
creation_date DATE,
last_modified_date DATE,
file_name STRING,
file_path STRING,
file_size INT64,
file_type STRING,
triplet_source_id STRING,
PRIMARY KEY(id)
)
"""
)
def create_entity_relationship_table(
connection: kuzu.Connection, label: str, src_id: str, dst_id: str
) -> None:
connection.execute(
f"""
CREATE REL TABLE IF NOT EXISTS {label} (
FROM {src_id} TO {dst_id},
label STRING,
triplet_source_id STRING
);
"""
)
def create_relation_tables(
connection: kuzu.Connection, entities: List[str], relationship_schema: List[Triple]
) -> None:
# Create relationship tables for each entity
for src, rel_label, dst in relationship_schema:
create_entity_relationship_table(connection, rel_label, src, dst)
ddl = "CREATE REL TABLE IF NOT EXISTS MENTIONS ("
table_names = []
for entity in entities:
table_names.append(f"FROM Chunk TO {entity}")
table_names = list(set(table_names))
ddl += ", ".join(table_names)
# Add common properties for all the tables here
ddl += ", label STRING, triplet_source_id STRING)"
if ddl:
connection.execute(ddl)
|
from typing import List, _LiteralGenericAlias, get_args, Tuple
import kuzu
Triple = Tuple[str, str, str]
def create_fresh_database(db: str) -> None:
"""
Create a new Kùzu database by removing existing database directory and its contents.
"""
import shutil
shutil.rmtree(db, ignore_errors=True)
def get_list_from_literal(literal: _LiteralGenericAlias) -> List[str]:
"""
Get a list of strings from a Literal type.
Parameters:
literal (_LiteralGenericAlias): The Literal type from which to extract the strings.
Returns:
List[str]: A list of strings extracted from the Literal type.
"""
if not isinstance(literal, _LiteralGenericAlias):
raise TypeError(
f"{literal} must be a Literal type.\nTry using typing.Literal{literal}."
)
return list(get_args(literal))
def remove_empty_values(input_dict):
"""
Remove entries with empty values from the dictionary.
Parameters:
input_dict (dict): The dictionary from which empty values need to be removed.
Returns:
dict: A new dictionary with all empty values removed.
"""
# Create a new dictionary excluding empty values and remove the `e.` prefix from the keys
return {key.replace("e.", ""): value for key, value in input_dict.items() if value}
def get_filtered_props(records: dict, filter_list: List[str]) -> dict:
return {k: v for k, v in records.items() if k not in filter_list}
# Lookup entry by middle value of tuple
def lookup_relation(relation: str, triples: List[Triple]) -> Triple:
"""
Look up a triple in a list of triples by the middle value.
"""
for triple in triples:
if triple[1] == relation:
return triple
return None
def create_chunk_node_table(connection: kuzu.Connection) -> None:
# For now, the additional `properties` dict from LlamaIndex is stored as a string
# TODO: See if it makes sense to add better support for property metadata as columns
connection.execute(
f"""
CREATE NODE TABLE IF NOT EXISTS Chunk (
id STRING,
text STRING,
label STRING,
embedding DOUBLE[],
creation_date DATE,
last_modified_date DATE,
file_name STRING,
file_path STRING,
file_size INT64,
file_type STRING,
ref_doc_id STRING,
PRIMARY KEY(id)
)
"""
)
def create_entity_node_tables(connection: kuzu.Connection, entities: List[str]) -> None:
for tbl_name in entities:
# For now, the additional `properties` dict from LlamaIndex is stored as a string
# TODO: See if it makes sense to add better support for property metadata as columns
connection.execute(
f"""
CREATE NODE TABLE IF NOT EXISTS {tbl_name} (
id STRING,
name STRING,
label STRING,
embedding DOUBLE[],
creation_date DATE,
last_modified_date DATE,
file_name STRING,
file_path STRING,
file_size INT64,
file_type STRING,
triplet_source_id STRING,
PRIMARY KEY(id)
)
"""
)
def create_entity_relationship_table(
connection: kuzu.Connection, label: str, src_id: str, dst_id: str
) -> None:
connection.execute(
f"""
CREATE REL TABLE IF NOT EXISTS {label} (
FROM {src_id} TO {dst_id},
label STRING,
triplet_source_id STRING
);
"""
)
def create_relation_tables(
connection: kuzu.Connection, entities: List[str], relationship_schema: List[Triple]
) -> None:
# Create relationship tables for each entity
for src, rel_label, dst in relationship_schema:
create_entity_relationship_table(connection, rel_label, src, dst)
ddl = "CREATE REL TABLE IF NOT EXISTS MENTIONS ("
table_names = []
for entity in entities:
table_names.append(f"FROM Chunk TO {entity}")
table_names = list(set(table_names))
ddl += ", ".join(table_names)
# Add common properties for all the tables here
ddl += ", label STRING, triplet_source_id STRING)"
if ddl:
connection.execute(ddl)
|
# CREDITS: https://github.com/openai/CLIP
import gzip
import html
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return str(Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
# CREDITS: https://github.com/openai/CLIP
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.getcwd(), '.cache', 'bpe_simple_vocab_16e6.txt.gz')
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
|
"""Init file of LlamaIndex."""
__version__ = "0.12.27"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.26"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import (
MetadataMode,
Node,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
"""Retriever tool."""
from typing import TYPE_CHECKING, Any, List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import LlamaIndexTool
from llama_index.core.schema import (
MetadataMode,
Node,
NodeWithScore,
QueryBundle,
TextNode,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
from llama_index.core.postprocessor.types import BaseNodePostprocessor
DEFAULT_NAME = "retriever_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and retrieving a set of relevant documents.
"""
class RetrieverTool(AsyncBaseTool):
"""Retriever tool.
A tool making use of a retriever.
Args:
retriever (BaseRetriever): A retriever.
metadata (ToolMetadata): The associated metadata of the query engine.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): A list of
node postprocessors.
"""
def __init__(
self,
retriever: BaseRetriever,
metadata: ToolMetadata,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
) -> None:
self._retriever = retriever
self._metadata = metadata
self._node_postprocessors = node_postprocessors or []
@classmethod
def from_defaults(
cls,
retriever: BaseRetriever,
node_postprocessors: Optional[List[BaseNodePostprocessor]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> "RetrieverTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
retriever=retriever,
metadata=metadata,
node_postprocessors=node_postprocessors,
)
@property
def retriever(self) -> BaseRetriever:
return self._retriever
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = self._retriever.retrieve(query_str)
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
content = ""
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = ""
if args is not None:
query_str += ", ".join([str(arg) for arg in args]) + "\n"
if kwargs is not None:
query_str += (
", ".join([f"{k!s} is {v!s}" for k, v in kwargs.items()]) + "\n"
)
if query_str == "":
raise ValueError("Cannot call query engine without inputs")
docs = await self._retriever.aretrieve(query_str)
content = ""
docs = self._apply_node_postprocessors(docs, QueryBundle(query_str))
for doc in docs:
assert isinstance(doc.node, (Node, TextNode))
node_copy = doc.node.model_copy()
node_copy.text_template = "{metadata_str}\n{content}"
node_copy.metadata_template = "{key} = {value}"
content += node_copy.get_content(MetadataMode.LLM) + "\n\n"
return ToolOutput(
content=content,
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=docs,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
raise NotImplementedError("`as_langchain_tool` not implemented here.")
def _apply_node_postprocessors(
self, nodes: List[NodeWithScore], query_bundle: QueryBundle
) -> List[NodeWithScore]:
for node_postprocessor in self._node_postprocessors:
nodes = node_postprocessor.postprocess_nodes(
nodes, query_bundle=query_bundle
)
return nodes
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .log_processor import LogProcessor
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .priority import Priority, get_priority
from .runner import Runner
from .utils import set_random_seed
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner', 'get_priority', 'Priority', 'find_latest_checkpoint',
'autocast', 'LogProcessor', 'set_random_seed'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp import autocast
from .base_loop import BaseLoop
from .checkpoint import (CheckpointLoader, find_latest_checkpoint,
get_deprecated_model_names, get_external_models,
get_mmcls_models, get_state_dict,
get_torchvision_models, load_checkpoint,
load_state_dict, save_checkpoint, weights_to_cpu)
from .log_processor import LogProcessor
from .loops import EpochBasedTrainLoop, IterBasedTrainLoop, TestLoop, ValLoop
from .priority import Priority, get_priority
from .runner import Runner
__all__ = [
'BaseLoop', 'load_state_dict', 'get_torchvision_models',
'get_external_models', 'get_mmcls_models', 'get_deprecated_model_names',
'CheckpointLoader', 'load_checkpoint', 'weights_to_cpu', 'get_state_dict',
'save_checkpoint', 'EpochBasedTrainLoop', 'IterBasedTrainLoop', 'ValLoop',
'TestLoop', 'Runner', 'get_priority', 'Priority', 'find_latest_checkpoint',
'autocast', 'LogProcessor'
]
|
_base_ = './cascade-rcnn_hrnetv2p-w32-20e_coco.py'
# model settings
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
_base_ = './cascade_rcnn_hrnetv2p_w32_20e_coco.py'
# model settings
model = dict(
backbone=dict(
extra=dict(
stage2=dict(num_channels=(18, 36)),
stage3=dict(num_channels=(18, 36, 72)),
stage4=dict(num_channels=(18, 36, 72, 144))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w18')),
neck=dict(type='HRFPN', in_channels=[18, 36, 72, 144], out_channels=256))
|
# training schedule for 20e
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=20, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=20,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
# training schedule for 20e
train_cfg = dict(by_epoch=True, max_epochs=20)
val_cfg = dict(interval=1)
test_cfg = dict()
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=20,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
|
import logging
import traceback
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderNanoBEIREvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
def main():
model_name = "answerdotai/ModernBERT-base"
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 32
num_epochs = 1
# 1. Define our CrossEncoder model
model = CrossEncoder(model_name)
print("Model max length:", model.max_length)
print("Model num labels:", model.num_labels)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/microsoft/ms_marco
logging.info("Read train dataset")
dataset = load_dataset("microsoft/ms_marco", "v1.1", split="train")
def bce_mapper(batch):
queries = []
passages = []
labels = []
for query, passages_info in zip(batch["query"], batch["passages"]):
for idx, is_selected in enumerate(passages_info["is_selected"]):
queries.append(query)
passages.append(passages_info["passage_text"][idx])
labels.append(is_selected)
return {"query": queries, "passage": passages, "label": labels}
dataset = dataset.map(bce_mapper, batched=True, remove_columns=dataset.column_names)
dataset = dataset.train_test_split(test_size=10_000)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
logging.info(train_dataset)
# 3. Define our training loss
loss = BinaryCrossEntropyLoss(model)
# 4. Define the evaluator. We use the CrossEncoderNanoBEIREvaluator, which is a light-weight evaluator for English reranking
evaluator = CrossEncoderNanoBEIREvaluator(dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size)
evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-msmarco-v1.1-{short_model_name}-bce"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=2e-5,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.BATCH_SAMPLER,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_R100_mean_ndcg@10",
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=4_000,
save_strategy="steps",
save_steps=4_000,
save_total_limit=2,
logging_steps=1_000,
logging_first_step=True,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=12,
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, useful to include these in the model card
evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
import logging
import traceback
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderNanoBEIREvaluator
from sentence_transformers.cross_encoder.losses.BinaryCrossEntropyLoss import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
def main():
model_name = "answerdotai/ModernBERT-base"
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 32
num_epochs = 1
# 1. Define our CrossEncoder model
model = CrossEncoder(model_name)
print("Model max length:", model.max_length)
print("Model num labels:", model.num_labels)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/microsoft/ms_marco
logging.info("Read train dataset")
dataset = load_dataset("microsoft/ms_marco", "v1.1", split="train")
def bce_mapper(batch):
queries = []
passages = []
labels = []
for query, passages_info in zip(batch["query"], batch["passages"]):
for idx, is_selected in enumerate(passages_info["is_selected"]):
queries.append(query)
passages.append(passages_info["passage_text"][idx])
labels.append(is_selected)
return {"query": queries, "passage": passages, "label": labels}
dataset = dataset.map(bce_mapper, batched=True, remove_columns=dataset.column_names)
dataset = dataset.train_test_split(test_size=10_000)
train_dataset = dataset["train"]
eval_dataset = dataset["test"]
logging.info(train_dataset)
# 3. Define our training loss
loss = BinaryCrossEntropyLoss(model)
# 4. Define the evaluator. We use the CrossEncoderNanoBEIREvaluator, which is a light-weight evaluator for English reranking
evaluator = CrossEncoderNanoBEIREvaluator(dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size)
evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-msmarco-v1.1-{short_model_name}-bce"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=2e-5,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.BATCH_SAMPLER,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_R100_mean_ndcg@10",
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=40_000,
save_strategy="steps",
save_steps=40_000,
save_total_limit=2,
logging_steps=10_000,
logging_first_step=True,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=12,
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, useful to include these in the model card
evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
import multiprocessing
import socket
import sys
import time
import numpy as np
import pytest
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
if sys.platform.startswith("win"):
pytest.skip("Skipping collective tests on Windows", allow_module_level=True)
def run_rabit_worker(rabit_env, world_size):
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast('test1234', 0)
assert str(ret) == 'test1234'
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
def test_rabit_communicator():
world_size = 2
tracker = RabitTracker(host_ip='127.0.0.1', n_workers=world_size)
tracker.start(world_size)
workers = []
for _ in range(world_size):
worker = multiprocessing.Process(target=run_rabit_worker,
args=(tracker.worker_envs(), world_size))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
def run_federated_worker(port, world_size, rank):
with xgb.collective.CommunicatorContext(xgboost_communicator='federated',
federated_server_address=f'localhost:{port}',
federated_world_size=world_size,
federated_rank=rank):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f'rank{rank}'
ret = xgb.collective.broadcast('test1234', 0)
assert str(ret) == 'test1234'
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
def test_federated_communicator():
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
server = multiprocessing.Process(target=xgb.federated.run_federated_server, args=(port, world_size))
server.start()
time.sleep(1)
if not server.is_alive():
raise Exception("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = multiprocessing.Process(target=run_federated_worker,
args=(port, world_size, rank))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
server.terminate()
|
import multiprocessing
import socket
import sys
import time
import numpy as np
import pytest
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
if sys.platform.startswith("win"):
pytest.skip("Skipping collective tests on Windows", allow_module_level=True)
def run_rabit_worker(rabit_env, world_size):
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast('test1234', 0)
assert str(ret) == 'test1234'
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
def test_rabit_communicator():
world_size = 2
tracker = RabitTracker(host_ip='127.0.0.1', n_workers=world_size)
tracker.start(world_size)
workers = []
for _ in range(world_size):
worker = multiprocessing.Process(target=run_rabit_worker,
args=(tracker.worker_envs(), world_size))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
# TODO(rongou): remove this once we remove the rabit api.
def run_rabit_api_worker(rabit_env, world_size):
with xgb.rabit.RabitContext(rabit_env):
assert xgb.rabit.get_world_size() == world_size
assert xgb.rabit.is_distributed()
assert xgb.rabit.get_processor_name().decode() == socket.gethostname()
ret = xgb.rabit.broadcast('test1234', 0)
assert str(ret) == 'test1234'
ret = xgb.rabit.allreduce(np.asarray([1, 2, 3]), xgb.rabit.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
# TODO(rongou): remove this once we remove the rabit api.
def test_rabit_api():
world_size = 2
tracker = RabitTracker(host_ip='127.0.0.1', n_workers=world_size)
tracker.start(world_size)
rabit_env = []
for k, v in tracker.worker_envs().items():
rabit_env.append(f"{k}={v}".encode())
workers = []
for _ in range(world_size):
worker = multiprocessing.Process(target=run_rabit_api_worker,
args=(rabit_env, world_size))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
def run_federated_worker(port, world_size, rank):
with xgb.collective.CommunicatorContext(xgboost_communicator='federated',
federated_server_address=f'localhost:{port}',
federated_world_size=world_size,
federated_rank=rank):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f'rank{rank}'
ret = xgb.collective.broadcast('test1234', 0)
assert str(ret) == 'test1234'
ret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(ret, np.asarray([2, 4, 6]))
def test_federated_communicator():
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
server = multiprocessing.Process(target=xgb.federated.run_federated_server, args=(port, world_size))
server.start()
time.sleep(1)
if not server.is_alive():
raise Exception("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = multiprocessing.Process(target=run_federated_worker,
args=(port, world_size, rank))
workers.append(worker)
worker.start()
for worker in workers:
worker.join()
assert worker.exitcode == 0
server.terminate()
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa
model = dict(
type='KnowledgeDistillationSingleStageDetector',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
teacher_config='configs/gfl/gfl_r101_fpn_ms-2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LDHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
loss_ld=dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth' # noqa
model = dict(
type='KnowledgeDistillationSingleStageDetector',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
teacher_config='configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py',
teacher_ckpt=teacher_ckpt,
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet18')),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LDHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
loss_ld=dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=0.25, T=10),
reg_max=16,
loss_bbox=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from .helpers import ContentSettings
class ExaFindSimilarBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
url: str = SchemaField(
description="The url for which you would like to find similar links"
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
advanced=True,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: List[Any] = SchemaField(
description="List of similar documents with title, URL, published date, author, and score",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="5e7315d1-af61-4a0c-9350-7c868fa7438a",
description="Finds similar links using Exa's findSimilar API",
categories={BlockCategory.SEARCH},
input_schema=ExaFindSimilarBlock.Input,
output_schema=ExaFindSimilarBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"url": input_data.url,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
optional_field_mapping = {
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add optional fields if they have values
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = Requests().post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
from datetime import datetime
from typing import Any, List
from backend.blocks.exa._auth import (
ExaCredentials,
ExaCredentialsField,
ExaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
from .helpers import ContentSettings
class ExaFindSimilarBlock(Block):
class Input(BlockSchema):
credentials: ExaCredentialsInput = ExaCredentialsField()
url: str = SchemaField(
description="The url for which you would like to find similar links"
)
number_of_results: int = SchemaField(
description="Number of results to return",
default=10,
advanced=True,
)
include_domains: List[str] = SchemaField(
description="Domains to include in search",
default_factory=list,
advanced=True,
)
exclude_domains: List[str] = SchemaField(
description="Domains to exclude from search",
default_factory=list,
advanced=True,
)
start_crawl_date: datetime = SchemaField(
description="Start date for crawled content",
)
end_crawl_date: datetime = SchemaField(
description="End date for crawled content",
)
start_published_date: datetime = SchemaField(
description="Start date for published content",
)
end_published_date: datetime = SchemaField(
description="End date for published content",
)
include_text: List[str] = SchemaField(
description="Text patterns to include (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
exclude_text: List[str] = SchemaField(
description="Text patterns to exclude (max 1 string, up to 5 words)",
default_factory=list,
advanced=True,
)
contents: ContentSettings = SchemaField(
description="Content retrieval settings",
default=ContentSettings(),
advanced=True,
)
class Output(BlockSchema):
results: List[Any] = SchemaField(
description="List of similar documents with title, URL, published date, author, and score",
default_factory=list,
)
error: str = SchemaField(description="Error message if the request failed")
def __init__(self):
super().__init__(
id="5e7315d1-af61-4a0c-9350-7c868fa7438a",
description="Finds similar links using Exa's findSimilar API",
categories={BlockCategory.SEARCH},
input_schema=ExaFindSimilarBlock.Input,
output_schema=ExaFindSimilarBlock.Output,
)
def run(
self, input_data: Input, *, credentials: ExaCredentials, **kwargs
) -> BlockOutput:
url = "https://api.exa.ai/findSimilar"
headers = {
"Content-Type": "application/json",
"x-api-key": credentials.api_key.get_secret_value(),
}
payload = {
"url": input_data.url,
"numResults": input_data.number_of_results,
"contents": input_data.contents.dict(),
}
optional_field_mapping = {
"include_domains": "includeDomains",
"exclude_domains": "excludeDomains",
"include_text": "includeText",
"exclude_text": "excludeText",
}
# Add optional fields if they have values
for input_field, api_field in optional_field_mapping.items():
value = getattr(input_data, input_field)
if value: # Only add non-empty values
payload[api_field] = value
date_field_mapping = {
"start_crawl_date": "startCrawlDate",
"end_crawl_date": "endCrawlDate",
"start_published_date": "startPublishedDate",
"end_published_date": "endPublishedDate",
}
# Add dates if they exist
for input_field, api_field in date_field_mapping.items():
value = getattr(input_data, input_field, None)
if value:
payload[api_field] = value.strftime("%Y-%m-%dT%H:%M:%S.000Z")
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
yield "results", data.get("results", [])
except Exception as e:
yield "error", str(e)
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("squad", ["plain_text"]),
("acronym_identification", ["default"]),
("lhoestq/squad", ["plain_text"]),
("lhoestq/test", ["default"]),
("lhoestq/demo1", ["default"]),
("dalle-mini/wit", ["default"]),
("datasets-maintainers/audiofolder_no_configs_in_metadata", ["default"]),
("datasets-maintainers/audiofolder_single_config_in_metadata", ["custom"]),
("datasets-maintainers/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_faillure = True
traceback.print_exc()
sys.exit(1 if has_failure else 0)
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_faillure = True
print(file)
traceback.print_exc()
print()
sys.exit(1 if has_failure else 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-float(1 + steps) / self.gamma) + self.momentum
averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-(1 + steps) / self.gamma) + self.momentum
averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
soft_shrink,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
glu,
tanh,
tanh_shrink,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
hard_shrink,
linear,
mish,
log_softmax,
log_sigmoid,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: Tensor
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
assert isinstance(value, doc._get_nested_document_class(field))
|
import numpy as np
import torch
from docarray import Document
from docarray.document import AnyDocument
from docarray.typing import AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
def test_proto_all_types():
class Mymmdoc(Document):
tensor: Tensor
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
assert isinstance(value, doc._get_nested_document_class(field))
|
"""Auto Merging Retriever."""
from typing import Any, Dict, List
from llama_index.core import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import (
HierarchicalNodeParser,
get_leaf_nodes,
)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers.auto_merging_retriever import AutoMergingRetriever
from llama_index.core.schema import Document
from llama_index.core.storage import StorageContext
from llama_index.core.storage.docstore import SimpleDocumentStore
class AutoMergingRetrieverPack(BaseLlamaPack):
"""
Auto-merging Retriever pack.
Build a hierarchical node graph from a set of documents, and
run our auto-merging retriever.
"""
def __init__(
self,
docs: List[Document] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# create the sentence window node parser w/ default settings
self.node_parser = HierarchicalNodeParser.from_defaults()
nodes = self.node_parser.get_nodes_from_documents(docs)
leaf_nodes = get_leaf_nodes(nodes)
docstore = SimpleDocumentStore()
# insert nodes into docstore
docstore.add_documents(nodes)
# define storage context (will include vector store by default too)
storage_context = StorageContext.from_defaults(docstore=docstore)
self.base_index = VectorStoreIndex(leaf_nodes, storage_context=storage_context)
base_retriever = self.base_index.as_retriever(similarity_top_k=6)
self.retriever = AutoMergingRetriever(
base_retriever, storage_context, verbose=True
)
self.query_engine = RetrieverQueryEngine.from_args(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"node_parser": self.node_parser,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
"""Auto Merging Retriever."""
from typing import Any, Dict, List
from llama_index.core import VectorStoreIndex
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.node_parser import (
HierarchicalNodeParser,
get_leaf_nodes,
)
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers.auto_merging_retriever import AutoMergingRetriever
from llama_index.core.schema import Document
from llama_index.core.storage import StorageContext
from llama_index.core.storage.docstore import SimpleDocumentStore
class AutoMergingRetrieverPack(BaseLlamaPack):
"""Auto-merging Retriever pack.
Build a hierarchical node graph from a set of documents, and
run our auto-merging retriever.
"""
def __init__(
self,
docs: List[Document] = None,
**kwargs: Any,
) -> None:
"""Init params."""
# create the sentence window node parser w/ default settings
self.node_parser = HierarchicalNodeParser.from_defaults()
nodes = self.node_parser.get_nodes_from_documents(docs)
leaf_nodes = get_leaf_nodes(nodes)
docstore = SimpleDocumentStore()
# insert nodes into docstore
docstore.add_documents(nodes)
# define storage context (will include vector store by default too)
storage_context = StorageContext.from_defaults(docstore=docstore)
self.base_index = VectorStoreIndex(leaf_nodes, storage_context=storage_context)
base_retriever = self.base_index.as_retriever(similarity_top_k=6)
self.retriever = AutoMergingRetriever(
base_retriever, storage_context, verbose=True
)
self.query_engine = RetrieverQueryEngine.from_args(self.retriever)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"node_parser": self.node_parser,
"retriever": self.retriever,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
# Copyright 2024 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for cpu codegen testlib."""
from xla.backends.cpu.testlib import _extension
# Classes.
# go/keep-sorted start
ComputationKernelEmitter = _extension.ComputationKernelEmitter
ConcatenateKernelEmitter = _extension.ConcatenateKernelEmitter
DotKernelEmitter = _extension.DotKernelEmitter
ElementalKernelEmitter = _extension.ElementalKernelEmitter
HloCompiler = _extension.HloCompiler
JitCompiler = _extension.JitCompiler
KernelRunner = _extension.KernelRunner
LlvmTestKernelEmitter = _extension.LlvmTestKernelEmitter
MLIRContext = _extension.MLIRContext
MlirTestKernelEmitter = _extension.MlirTestKernelEmitter
ScatterKernelEmitter = _extension.ScatterKernelEmitter
TargetMachineFeatures = _extension.TargetMachineFeatures
# go/keep-sorted end
# Free functions.
# go/keep-sorted start
emit_fusion_kernel = _extension.emit_fusion_kernel
lower_to_llvm = _extension.lower_to_llvm
run_fusion_wrapper_pass = _extension.run_fusion_wrapper_pass
# go/keep-sorted end
|
# Copyright 2024 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Public API for cpu codegen testlib."""
from xla.backends.cpu.testlib import _extension
# go/keep-sorted start
ElementalKernelEmitter = _extension.ElementalKernelEmitter
HloCompiler = _extension.HloCompiler
JitCompiler = _extension.JitCompiler
KernelRunner = _extension.KernelRunner
LlvmIrKernelEmitter = _extension.LlvmIrKernelEmitter
LlvmIrKernelSpec = _extension.LlvmIrKernelSpec
TargetMachineFeatures = _extension.TargetMachineFeatures
# go/keep-sorted end
|
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_image, decode_jpeg, decode_webp, read_file
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = decode_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = decode_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
img_webp = decode_image(str(SCRIPT_DIR / "assets/fakedata/logos/rgb_pytorch.webp"))
if img_webp.shape != (3, 100, 100):
raise RuntimeError(f"Unexpected shape of img_webp: {img_webp.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = decode_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights, progress=False).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
print(f"{torch.ops.image._jpeg_version() = }")
if not torch.ops.image._is_compiled_against_turbo():
msg = "Torchvision wasn't compiled against libjpeg-turbo"
if os.getenv("IS_M1_CONDA_BUILD_JOB") == "1":
# When building the conda package on M1, it's difficult to enforce
# that we build against turbo due to interactions with the libwebp
# package. So we just accept it, instead of raising an error.
print(msg)
else:
raise ValueError(msg)
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
# TODO: remove once pytorch/pytorch#110436 is resolved
if sys.version_info < (3, 12, 0):
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, decode_webp, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
img_webp = read_image(str(SCRIPT_DIR / "assets/fakedata/logos/rgb_pytorch.webp"))
if img_webp.shape != (3, 100, 100):
raise RuntimeError(f"Unexpected shape of img_webp: {img_webp.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights, progress=False).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
print(f"{torch.ops.image._jpeg_version() = }")
if not torch.ops.image._is_compiled_against_turbo():
msg = "Torchvision wasn't compiled against libjpeg-turbo"
if os.getenv("IS_M1_CONDA_BUILD_JOB") == "1":
# When building the conda package on M1, it's difficult to enforce
# that we build against turbo due to interactions with the libwebp
# package. So we just accept it, instead of raising an error.
print(msg)
else:
raise ValueError(msg)
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
# TODO: remove once pytorch/pytorch#110436 is resolved
if sys.version_info < (3, 12, 0):
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
_base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
WAVLM_BASE,
WAVLM_BASE_PLUS,
WAVLM_LARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
import pytest
import torchaudio
from torchaudio.pipelines import (
HUBERT_ASR_LARGE,
HUBERT_ASR_XLARGE,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
VOXPOPULI_ASR_BASE_10K_DE,
VOXPOPULI_ASR_BASE_10K_EN,
VOXPOPULI_ASR_BASE_10K_ES,
VOXPOPULI_ASR_BASE_10K_FR,
VOXPOPULI_ASR_BASE_10K_IT,
WAV2VEC2_ASR_BASE_100H,
WAV2VEC2_ASR_BASE_10M,
WAV2VEC2_ASR_BASE_960H,
WAV2VEC2_ASR_LARGE_100H,
WAV2VEC2_ASR_LARGE_10M,
WAV2VEC2_ASR_LARGE_960H,
WAV2VEC2_ASR_LARGE_LV60K_100H,
WAV2VEC2_ASR_LARGE_LV60K_10M,
WAV2VEC2_ASR_LARGE_LV60K_960H,
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
)
@pytest.mark.parametrize(
"bundle",
[
WAV2VEC2_BASE,
WAV2VEC2_LARGE,
WAV2VEC2_LARGE_LV60K,
WAV2VEC2_XLSR53,
HUBERT_BASE,
HUBERT_LARGE,
HUBERT_XLARGE,
],
)
def test_pretraining_models(bundle):
"""Smoke test of downloading weights for pretraining models"""
bundle.get_model()
@pytest.mark.parametrize(
"bundle,lang,expected",
[
(WAV2VEC2_ASR_BASE_10M, "en", "I|HAD|THAT|CURIYOSSITY|BESID|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_BASE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_10M, "en", "I|HAD|THAT|CURIOUSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_100H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_960H, "en", "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_10M, "en", "I|HAD|THAT|CURIOUSSITY|BESID|ME|AT|THISS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_100H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(WAV2VEC2_ASR_LARGE_LV60K_960H, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_LARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(HUBERT_ASR_XLARGE, "en", "I|HAVE|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT|"),
(
VOXPOPULI_ASR_BASE_10K_EN,
"en2",
"i|hope|that|we|will|see|a|ddrasstic|decrease|of|funding|for|the|failed|eu|project|and|that|more|money|will|come|back|to|the|taxpayers", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_ES,
"es",
"la|primera|que|es|imprescindible|pensar|a|pequeña|a|escala|para|implicar|y|complementar|así|la|actuación|global", # noqa: E501
),
(VOXPOPULI_ASR_BASE_10K_DE, "de", "dabei|spielt|auch|eine|sorgfältige|berichterstattung|eine|wichtige|rolle"),
(
VOXPOPULI_ASR_BASE_10K_FR,
"fr",
"la|commission|va|faire|des|propositions|sur|ce|sujet|comment|mettre|en|place|cette|capacité|fiscale|et|le|conseil|européen|y|reviendra|sour|les|sujets|au|moins|de|mars", # noqa: E501
),
(
VOXPOPULI_ASR_BASE_10K_IT,
"it",
"credo|che|illatino|non|sia|contemplato|tra|le|traduzioni|e|quindi|mi|attengo|allitaliano",
),
],
)
def test_finetune_asr_model(
bundle,
lang,
expected,
sample_speech,
ctc_decoder,
):
"""Smoke test of downloading weights for fine-tuning models and simple transcription"""
model = bundle.get_model().eval()
waveform, sample_rate = torchaudio.load(sample_speech)
emission, _ = model(waveform)
decoder = ctc_decoder(bundle.get_labels())
result = decoder(emission[0])
assert result == expected
|
_base_ = './sparse_rcnn_r50_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 36
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './sparse_rcnn_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, value) for value in min_values],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(train=dict(pipeline=train_pipeline))
lr_config = dict(policy='step', step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
from ._presets import StereoMatching # usort: skip
from ._augment import SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
|
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutMix, RandomMixUp, SimpleCopyPaste
from ._geometry import FixedSizeCrop
from ._misc import PermuteDimensions, TransposeDimensions
from ._type_conversion import LabelToOneHot
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky as cholesky
from keras.src.ops.linalg import det as det
from keras.src.ops.linalg import eig as eig
from keras.src.ops.linalg import eigh as eigh
from keras.src.ops.linalg import inv as inv
from keras.src.ops.linalg import lstsq as lstsq
from keras.src.ops.linalg import lu_factor as lu_factor
from keras.src.ops.linalg import norm as norm
from keras.src.ops.linalg import qr as qr
from keras.src.ops.linalg import solve as solve
from keras.src.ops.linalg import solve_triangular as solve_triangular
from keras.src.ops.linalg import svd as svd
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky
from keras.src.ops.linalg import det
from keras.src.ops.linalg import eig
from keras.src.ops.linalg import eigh
from keras.src.ops.linalg import inv
from keras.src.ops.linalg import lstsq
from keras.src.ops.linalg import lu_factor
from keras.src.ops.linalg import norm
from keras.src.ops.linalg import qr
from keras.src.ops.linalg import solve
from keras.src.ops.linalg import solve_triangular
from keras.src.ops.linalg import svd
|
from docarray.array.array import DocumentArray
__all__ = ['DocumentArray']
|
from docarray.array.documentarray import DocumentArray
__all__ = ['DocumentArray']
|
"""This is the langchain_ollama package.
It provides infrastructure for interacting with the Ollama service.
"""
from importlib import metadata
from langchain_ollama.chat_models import ChatOllama
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain_ollama.llms import OllamaLLM
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
__all__ = [
"ChatOllama",
"OllamaLLM",
"OllamaEmbeddings",
"__version__",
]
|
"""This is the langchain_ollama package.
It provides infrastructure for interacting with the Ollama service.
"""
from importlib import metadata
from langchain_ollama.chat_models import ChatOllama
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain_ollama.llms import OllamaLLM
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
__all__ = [
"ChatOllama",
"OllamaLLM",
"OllamaEmbeddings",
"__version__",
]
|
import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
def wrap(wrappee, *, like, **kwargs):
"""[BETA] Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.datapoints.Datapoint` subclass as ``like``.
If ``like`` is a :class:`~torchvision.datapoints.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (:class:`~torchvision.datapoints.Datapoint`): The reference.
``wrappee`` will be converted into the same subclass as ``like``.
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.datapoint.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
import torch
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._torch_function_helpers import set_return_type
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
def wrap(wrappee, *, like, **kwargs):
"""Convert a :class:`torch.Tensor` (``wrappee``) into the same :class:`~torchvision.datapoint.Datapoint` subclass as ``like``.
If ``like`` is a :class:`~torchvision.datapoint.BoundingBoxes`, the ``format`` and ``canvas_size`` of
``like`` are assigned to ``wrappee``, unless they are passed as ``kwargs``.
Args:
wrappee (Tensor): The tensor to convert.
like (Datapoint): The
kwargs: Can contain "format" and "canvas_size" if ``like`` is a :class:`~torchvision.datapoint.BoundingBoxes`.
Ignored otherwise.
"""
if isinstance(like, BoundingBoxes):
return BoundingBoxes._wrap(
wrappee,
format=kwargs.get("format", like.format),
canvas_size=kwargs.get("canvas_size", like.canvas_size),
)
else:
return wrappee.as_subclass(type(like))
|
"""OpenAI-Like embeddings."""
from typing import Any, Dict, Optional
import httpx
from llama_index.core.callbacks.base import CallbackManager
from llama_index.embeddings.openai import OpenAIEmbedding
class OpenAILikeEmbedding(OpenAIEmbedding):
"""
OpenAI-Like class for embeddings.
Args:
model_name (str):
Model for embedding.
api_key (str):
The API key (if any) to use for the embedding API.
api_base (str):
The base URL for the embedding API.
api_version (str):
The version for the embedding API.
max_retries (int):
The maximum number of retries for the embedding API.
timeout (float):
The timeout for the embedding API.
reuse_client (bool):
Whether to reuse the client for the embedding API.
callback_manager (CallbackManager):
The callback manager for the embedding API.
default_headers (Dict[str, str]):
The default headers for the embedding API.
additional_kwargs (Dict[str, Any]):
Additional kwargs for the embedding API.
dimensions (int):
The number of dimensions for the embedding API.
Example:
```bash
pip install llama-index-embeddings-openai-like
```
```python
from llama_index.embeddings.openai_like import OpenAILikeEmbedding
embedding = OpenAILikeEmbedding(
model_name="my-model-name",
api_base="http://localhost:1234/v1",
api_key="fake",
embed_batch_size=10,
)
```
"""
def __init__(
self,
model_name: str,
embed_batch_size: int = 10,
dimensions: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: str = "fake",
api_base: Optional[str] = None,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
num_workers: Optional[int] = None,
**kwargs: Any,
) -> None:
# ensure model is not passed in kwargs, will cause error in parent class
if "model" in kwargs:
raise ValueError(
"Use `model_name` instead of `model` to initialize OpenAILikeEmbedding"
)
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
dimensions=dimensions,
callback_manager=callback_manager,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
reuse_client=reuse_client,
timeout=timeout,
default_headers=default_headers,
http_client=http_client,
async_http_client=async_http_client,
num_workers=num_workers,
**kwargs,
)
|
"""OpenAI-Like embeddings."""
from typing import Any, Dict, Optional
import httpx
from llama_index.core.callbacks.base import CallbackManager
from llama_index.embeddings.openai import OpenAIEmbedding
class OpenAILikeEmbedding(OpenAIEmbedding):
"""
OpenAI-Like class for embeddings.
Args:
model_name (str):
Model for embedding.
api_key (str):
The API key (if any) to use for the embedding API.
api_base (str):
The base URL for the embedding API.
api_version (str):
The version for the embedding API.
max_retries (int):
The maximum number of retries for the embedding API.
timeout (float):
The timeout for the embedding API.
reuse_client (bool):
Whether to reuse the client for the embedding API.
callback_manager (CallbackManager):
The callback manager for the embedding API.
default_headers (Dict[str, str]):
The default headers for the embedding API.
additional_kwargs (Dict[str, Any]):
Additional kwargs for the embedding API.
dimensions (int):
The number of dimensions for the embedding API.
Example:
```bash
pip install llama-index-embeddings-openai-like
```
```python
from llama_index.embeddings.openai_like import OpenAILikeEmbedding
embedding = OpenAILikeEmbedding(
model_name="my-model-name",
api_base="http://localhost:1234/v1",
api_key="fake",
embed_batch_size=10,
)
```
"""
def __init__(
self,
model_name: str,
embed_batch_size: int = 10,
dimensions: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: str = "fake",
api_base: Optional[str] = None,
api_version: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
async_http_client: Optional[httpx.AsyncClient] = None,
num_workers: Optional[int] = None,
**kwargs: Any,
) -> None:
# ensure model is not passed in kwargs, will cause error in parent class
if "model" in kwargs:
raise ValueError(
"Use `model_name` instead of `model` to initialize OpenAILikeEmbedding"
)
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
dimensions=dimensions,
callback_manager=callback_manager,
additional_kwargs=additional_kwargs,
api_key=api_key,
api_base=api_base,
api_version=api_version,
max_retries=max_retries,
reuse_client=reuse_client,
timeout=timeout,
default_headers=default_headers,
num_workers=num_workers,
**kwargs,
)
|
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
import sqlite3
import warnings
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Iterable, Dict, Optional, TYPE_CHECKING, Union
from docarray.array.storage.sqlite.helper import initialize_table
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray.helper import random_identity, dataclass_from_dict
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType
def _sanitize_table_name(table_name: str, raise_warning=True) -> str:
ret = ''.join(c for c in table_name if c.isalnum() or c == '_')
if ret != table_name and raise_warning:
warnings.warn(f'The table name is changed to {ret} due to illegal characters')
return ret
@dataclass
class SqliteConfig:
connection: Optional[Union[str, 'sqlite3.Connection']] = None
table_name: Optional[str] = None
serialize_config: Dict = field(default_factory=dict)
conn_config: Dict = field(default_factory=dict)
journal_mode: str = 'WAL'
synchronous: str = 'OFF'
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
schema_version = '0'
def _sql(self, *args, **kwargs) -> 'sqlite3.Cursor':
return self._cursor.execute(*args, **kwargs)
def _commit(self):
self._connection.commit()
@property
def _cursor(self) -> 'sqlite3.Cursor':
return self._connection.cursor()
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[SqliteConfig, Dict]] = None,
**kwargs,
):
if not config:
config = SqliteConfig()
if isinstance(config, dict):
config = dataclass_from_dict(SqliteConfig, config)
from docarray import Document
sqlite3.register_adapter(
Document, lambda d: d.to_bytes(**config.serialize_config)
)
sqlite3.register_converter(
'Document', lambda x: Document.from_bytes(x, **config.serialize_config)
)
_conn_kwargs = dict()
_conn_kwargs.update(config.conn_config)
if config.connection is None:
config.connection = NamedTemporaryFile().name
if isinstance(config.connection, str):
self._connection = sqlite3.connect(
config.connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
elif isinstance(config.connection, sqlite3.Connection):
self._connection = config.connection
else:
raise TypeError(
f'connection argument must be None or a string or a sqlite3.Connection, not `{type(config.connection)}`'
)
self._connection.execute(f'PRAGMA synchronous={config.synchronous}')
self._connection.execute(f'PRAGMA journal_mode={config.journal_mode}')
self._table_name = (
_sanitize_table_name(self.__class__.__name__ + random_identity())
if config.table_name is None
else _sanitize_table_name(config.table_name)
)
self._persist = bool(config.table_name)
config.table_name = self._table_name
initialize_table(
self._table_name, self.__class__.__name__, self.schema_version, self._cursor
)
self._connection.commit()
self._config = config
super()._init_storage()
if _docs is None:
return
elif isinstance(_docs, Iterable):
self.clear()
self.extend(_docs)
else:
self.clear()
if isinstance(_docs, Document):
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
if 'table_name' not in config_subindex:
subindex_table_name = _sanitize_table_name(
config_joined['table_name'] + 'subindex' + subindex_name,
raise_warning=False,
)
config_joined['table_name'] = subindex_table_name
return config_joined
def __getstate__(self):
d = dict(self.__dict__)
del d['_connection']
return d
def __setstate__(self, state):
self.__dict__ = state
_conn_kwargs = dict()
_conn_kwargs.update(state['_config'].conn_config)
self._connection = sqlite3.connect(
state['_config'].connection,
detect_types=sqlite3.PARSE_DECLTYPES,
check_same_thread=False,
**_conn_kwargs,
)
|
from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorAfterReload'
@requests(on='/bar')
def bar(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorAfterReloadBar'
|
from jina import Executor, requests
class MyExecutorToReload1(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@requests()
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'MyExecutorAfterReload'
|
import asyncio
from typing import Any, AsyncGenerator, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.errors import WorkflowDone
from llama_index.core.workflow.events import Event, StopEvent
from .utils import BUSY_WAIT_DELAY
class WorkflowHandler(asyncio.Future):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any,
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self.ctx = ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if not self.ctx:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[Event]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx and not self.ctx.stepwise:
raise ValueError("Stepwise context is required to run stepwise.")
if self.ctx:
try:
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) is not WorkflowDone:
exception_raised = e
if we_done:
# Remove any reference to the tasks
for t in self.ctx._tasks:
t.cancel()
await asyncio.sleep(0)
# the context is no longer running
self.ctx.is_running = False
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else:
# Continue with running next step. Make sure we wait for the
# step function to return before proceeding.
in_progress = len(await self.ctx.running_steps())
while in_progress:
await asyncio.sleep(BUSY_WAIT_DELAY)
in_progress = len(await self.ctx.running_steps())
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx._step_event_holding
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
else:
raise ValueError("Context is not set!")
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
import asyncio
from typing import Any, AsyncGenerator, Optional
from llama_index.core.workflow.context import Context
from llama_index.core.workflow.events import Event, StopEvent
from llama_index.core.workflow.errors import WorkflowDone
class WorkflowHandler(asyncio.Future):
def __init__(
self,
*args: Any,
ctx: Optional[Context] = None,
run_id: Optional[str] = None,
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self.run_id = run_id
self.ctx = ctx
def __str__(self) -> str:
return str(self.result())
def is_done(self) -> bool:
return self.done()
async def stream_events(self) -> AsyncGenerator[Event, None]:
if not self.ctx:
raise ValueError("Context is not set!")
while True:
ev = await self.ctx.streaming_queue.get()
yield ev
if type(ev) is StopEvent:
break
async def run_step(self) -> Optional[Event]:
"""Runs the next workflow step and returns the output Event.
If return is None, then the workflow is considered done.
Examples:
```python
handler = workflow.run(stepwise=True)
while not handler.is_done():
ev = await handler.run_step()
handler.ctx.send_event(ev)
result = handler.result()
print(result)
```
"""
# since event is sent before calling this method, we need to unblock the event loop
await asyncio.sleep(0)
if self.ctx and not self.ctx.stepwise:
raise ValueError("Stepwise context is required to run stepwise.")
if self.ctx:
try:
# Unblock all pending steps
for flag in self.ctx._step_flags.values():
flag.set()
# Yield back control to the event loop to give an unblocked step
# the chance to run (we won't actually sleep here).
await asyncio.sleep(0)
# check if we're done, or if a step raised error
we_done = False
exception_raised = None
retval = None
for t in self.ctx._tasks:
# Check if we're done
if not t.done():
continue
we_done = True
e = t.exception()
if type(e) != WorkflowDone:
exception_raised = e
if we_done:
# Remove any reference to the tasks
for t in self.ctx._tasks:
t.cancel()
await asyncio.sleep(0)
# the context is no longer running
self.ctx.is_running = False
if exception_raised:
raise exception_raised
if not self.done():
self.set_result(self.ctx.get_result())
else: # continue with running next step
# notify unblocked task that we're ready to accept next event
async with self.ctx._step_condition:
self.ctx._step_condition.notify()
# Wait to be notified that the new_ev has been written
async with self.ctx._step_event_written:
await self.ctx._step_event_written.wait()
retval = self.ctx._step_event_holding
except Exception as e:
if not self.is_done(): # Avoid InvalidStateError edge case
self.set_exception(e)
raise
else:
raise ValueError("Context is not set!")
return retval
async def cancel_run(self) -> None:
"""Method to cancel a Workflow execution."""
if self.ctx:
self.ctx._cancel_flag.set()
await asyncio.sleep(0)
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode="mean",
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-mean-pooling-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-mean-pooling-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""
This example uses average word embeddings (for example from GloVe). It adds two fully-connected feed-forward layers (dense layers) to create a Deep Averaging Network (DAN).
If 'glove.6B.300d.txt.gz' does not exist, it tries to download it from our server.
See https://public.ukp.informatik.tu-darmstadt.de/reimers/embeddings/
for available word embeddings files
"""
import traceback
from datasets import load_dataset
from sentence_transformers import models, losses
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
num_train_epochs = 1
batch_size = 32
output_dir = "output/training_stsbenchmark_avg_word_embeddings-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 2. Define the model
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode="mean",
)
# Add two trainable feed-forward networks (DAN)
sent_embeddings_dimension = pooling_model.get_sentence_embedding_dimension()
dan1 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
dan2 = models.Dense(in_features=sent_embeddings_dimension, out_features=sent_embeddings_dimension)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model, dan1, dan2])
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and
# one similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="glove-mean-pooling-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = "glove-mean-pooling-sts"
try:
model.push_to_hub(model_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}')`."
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE as MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths as assert_same_paths
from keras.src.tree.tree_api import (
assert_same_structure as assert_same_structure,
)
from keras.src.tree.tree_api import flatten as flatten
from keras.src.tree.tree_api import flatten_with_path as flatten_with_path
from keras.src.tree.tree_api import is_nested as is_nested
from keras.src.tree.tree_api import lists_to_tuples as lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure as map_shape_structure
from keras.src.tree.tree_api import map_structure as map_structure
from keras.src.tree.tree_api import map_structure_up_to as map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as as pack_sequence_as
from keras.src.tree.tree_api import traverse as traverse
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.tree.tree_api import MAP_TO_NONE
from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import traverse
|
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
class TimmImageEncoder(Executor):
"""
TimmImageEncoder encodes Document blobs of type `ndarray` (`float32`) and shape
`H x W x 3` into `d`-dimensional embedding. The input image in Document should be
in RGB format.
If `use_default_preprocessing=False`, the expected input shape is
`3 x H x W` with `float32` dtype.
Internally, :class:`TimmImageEncoder` wraps the pre-trained models from
[Timm library](https://rwightman.github.io/pytorch-image-models/).
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
traversal_paths: Tuple[str] = ('r',),
batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs
):
"""
:param model_name: the name of the model. Models listed on:
https://rwightman.github.io/pytorch-image-models/models/
:param device: Which device the model runs on. For example 'cpu' or 'cuda'.
:param traversal_paths: Defines traversal path through the docs. It can be
overridden via request params.
:param batch_size: Defines the batch size for inference on the loaded Timm model.
It can be overridden via request params.
:param use_default_preprocessing: If the input should be preprocessed with
default configuration. If `False`, inputs are expected to be pre-processed.
:param args: Additional positional arguments.
:param kwargs: Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
self.device = device
self.batch_size = batch_size
self.use_default_preprocessing = use_default_preprocessing
self.traversal_paths = traversal_paths
self._model = create_model(model_name, pretrained=True, num_classes=0)
self._model = self._model.to(device).eval()
config = resolve_data_config({}, model=self._model)
self._preprocess = create_transform(**config)
self._preprocess.transforms.insert(0, T.ToPILImage())
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data from the `blob` attribute of Documents into a ndarray of
`D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary with additional request parameters. Possible
values are `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional keyword arguments.
"""
if docs is None:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
docs_batch_generator = docs.traverse_flat(
traversal_paths=traversal_paths, filter_fn=lambda doc: doc.blob is not None
).batch(batch_size=batch_size)
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack([self._preprocess(img) for img in blob_batch])
else:
images = np.stack(blob_batch)
with torch.inference_mode():
tensor = torch.from_numpy(images).to(self.device)
features = self._model(tensor)
features = features.cpu().numpy()
for doc, embed in zip(document_batch, features):
doc.embedding = embed
|
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torchvision.transforms as T
from jina import DocumentArray, Executor, requests
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
class TimmImageEncoder(Executor):
"""
TimmImageEncoder encodes Document blobs of type `ndarray` (`float32`) and shape
`H x W x 3` into `d`-dimensional embedding. The input image in Document should be
in RGB format.
If `use_default_preprocessing=False`, the expected input shape is
`3 x H x W` with `float32` dtype.
Internally, :class:`TimmImageEncoder` wraps the pre-trained models from
[Timm library](https://rwightman.github.io/pytorch-image-models/).
"""
def __init__(
self,
model_name: str = 'resnet18',
device: str = 'cpu',
traversal_paths: Tuple[str] = ('r',),
batch_size: Optional[int] = 32,
use_default_preprocessing: bool = True,
*args,
**kwargs
):
"""
:param model_name: the name of the model. Models listed on:
https://rwightman.github.io/pytorch-image-models/models/
:param device: Which device the model runs on. For example 'cpu' or 'cuda'.
:param traversal_paths: Defines traversal path through the docs. It can be
overridden via request params.
:param batch_size: Defines the batch size for inference on the loaded Timm model.
It can be overridden via request params.
:param use_default_preprocessing: If the input should be preprocessed with
default configuration. If `False`, inputs are expected to be pre-processed.
:param args: Additional positional arguments.
:param kwargs: Additional keyword arguments.
"""
super().__init__(*args, **kwargs)
self.device = device
self.batch_size = batch_size
self.use_default_preprocessing = use_default_preprocessing
self.traversal_paths = traversal_paths
self._model = create_model(model_name, pretrained=True, num_classes=0)
self._model = self._model.to(device).eval()
config = resolve_data_config({}, model=self._model)
self._preprocess = create_transform(**config)
self._preprocess.transforms.insert(0, T.ToPILImage())
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode image data from the `blob` attribute of Documents into a ndarray of
`D` as dimension, and fill the embedding of each Document.
:param docs: DocumentArray containing images
:param parameters: dictionary with additional request parameters. Possible
values are `traversal_paths` and the `batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
:param kwargs: Additional keyword arguments.
"""
if docs is None:
return
traversal_paths = parameters.get('traversal_paths', self.traversal_paths)
batch_size = parameters.get('batch_size', self.batch_size)
docs_batch_generator = docs.traverse_flat(
traversal_paths=traversal_paths,
filter_fn=lambda doc: doc.blob is not None
).batch(
batch_size=batch_size
)
for document_batch in docs_batch_generator:
blob_batch = [d.blob for d in document_batch]
if self.use_default_preprocessing:
images = np.stack([self._preprocess(img) for img in blob_batch])
else:
images = np.stack(blob_batch)
with torch.inference_mode():
tensor = torch.from_numpy(images).to(self.device)
features = self._model(tensor)
features = features.cpu().numpy()
for doc, embed in zip(document_batch, features):
doc.embedding = embed
|
from typing import Any, List, Optional, Union
from llama_index.core.base.llms.types import ChatMessage, ContentBlock, TextBlock
from llama_index.core.bridge.pydantic import Field, field_validator
from llama_index.core.memory.memory import BaseMemoryBlock
class StaticMemoryBlock(BaseMemoryBlock[List[ContentBlock]]):
"""
A memory block that returns static text.
This block is useful for including constant information or instructions
in the context without relying on external processing.
"""
name: str = Field(
default="StaticContent", description="The name of the memory block."
)
static_content: Union[List[ContentBlock]] = Field(
description="Static text or content to be returned by this memory block."
)
@field_validator("static_content", mode="before")
@classmethod
def validate_static_content(
cls, v: Union[str, List[ContentBlock]]
) -> List[ContentBlock]:
if isinstance(v, str):
v = [TextBlock(text=v)]
return v
async def _aget(
self, messages: Optional[List[ChatMessage]] = None, **block_kwargs: Any
) -> List[ContentBlock]:
"""Return the static text, potentially filtered by conditions."""
return self.static_content
async def _aput(self, messages: List[ChatMessage]) -> None:
"""No-op for static blocks as they don't change."""
|
from typing import Any, List, Optional, Union
from llama_index.core.base.llms.types import ChatMessage, ContentBlock, TextBlock
from llama_index.core.bridge.pydantic import Field, field_validator
from llama_index.core.memory.memory import BaseMemoryBlock
class StaticMemoryBlock(BaseMemoryBlock[List[ContentBlock]]):
"""
A memory block that returns static text.
This block is useful for including constant information or instructions
in the context without relying on external processing.
"""
name: str = Field(
default="StaticContent",
description="The name of the memory block."
)
static_content: Union[List[ContentBlock]] = Field(
description="Static text or content to be returned by this memory block."
)
@field_validator("static_content", mode="before")
@classmethod
def validate_static_content(cls, v: Union[str, List[ContentBlock]]) -> List[ContentBlock]:
if isinstance(v, str):
v = [TextBlock(text=v)]
return v
async def _aget(self, messages: Optional[List[ChatMessage]] = None, **block_kwargs: Any) -> List[ContentBlock]:
"""Return the static text, potentially filtered by conditions."""
return self.static_content
async def _aput(self, messages: List[ChatMessage]) -> None:
"""No-op for static blocks as they don't change."""
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING: # pragma: no cover
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
from argparse import Namespace
from copy import deepcopy
from typing import TYPE_CHECKING, Type
from hubble.executor.helper import is_valid_huburi
from hubble.executor.hubio import HubIO
from jina.enums import PodRoleType
from jina.orchestrate.pods import Pod
from jina.orchestrate.pods.container import ContainerPod
if TYPE_CHECKING:
from jina.orchestrate.pods import BasePod
class PodFactory:
"""
A PodFactory is a factory class, abstracting the Pod creation
"""
@staticmethod
def build_pod(args: 'Namespace') -> Type['BasePod']:
"""Build an implementation of a `BasePod` interface
:param args: deployment arguments parsed from the CLI.
:return: the created BaseDeployment
"""
# copy to update but forward original
cargs = deepcopy(args)
if is_valid_huburi(cargs.uses):
_hub_args = deepcopy(args)
_hub_args.uri = args.uses
_hub_args.no_usage = True
cargs.uses = HubIO(_hub_args).pull()
if (
cargs.pod_role != PodRoleType.HEAD
and cargs.uses
and cargs.uses.startswith('docker://')
):
return ContainerPod(cargs)
else:
return Pod(args)
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .tacotron2_loss_impl import Tacotron2LossGradcheckTests, Tacotron2LossShapeTests, Tacotron2LossTorchscriptTests
class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2TorchsciptFloat32CPU(Tacotron2LossTorchscriptTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2GradcheckFloat64CPU(Tacotron2LossGradcheckTests, PytorchTestCase):
dtype = torch.float64 # gradcheck needs a higher numerical accuracy
device = torch.device("cpu")
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .tacotron2_loss_impl import (
Tacotron2LossGradcheckTests,
Tacotron2LossShapeTests,
Tacotron2LossTorchscriptTests,
)
class TestTacotron2LossShapeFloat32CPU(Tacotron2LossShapeTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2TorchsciptFloat32CPU(Tacotron2LossTorchscriptTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2GradcheckFloat64CPU(Tacotron2LossGradcheckTests, PytorchTestCase):
dtype = torch.float64 # gradcheck needs a higher numerical accuracy
device = torch.device("cpu")
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
from transformers import is_torch_hpu_available, is_torch_xpu_available
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
accelerator = "NA"
if torch.cuda.is_available():
accelerator = "CUDA"
elif is_torch_xpu_available():
accelerator = "XPU"
elif is_torch_hpu_available():
accelerator = "HPU"
print("Torch accelerator:", accelerator)
if accelerator == "CUDA":
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
elif accelerator == "XPU":
print("SYCL version:", torch.version.xpu)
print("Number of XPUs available:", torch.xpu.device_count())
elif accelerator == "HPU":
print("HPU version:", torch.__version__.split("+")[-1])
print("Number of HPUs available:", torch.hpu.device_count())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
try:
import torchcodec
versions = torchcodec._core.get_ffmpeg_library_versions()
print("FFmpeg version:", versions["ffmpeg_version"])
except ImportError:
print("FFmpeg version:", None)
except (AttributeError, KeyError):
print("Failed to get FFmpeg version")
|
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
from transformers import is_torch_hpu_available, is_torch_xpu_available
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
accelerator = "NA"
if torch.cuda.is_available():
accelerator = "CUDA"
elif is_torch_xpu_available():
accelerator = "XPU"
elif is_torch_hpu_available():
accelerator = "HPU"
print("Torch accelerator:", accelerator)
if accelerator == "CUDA":
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
elif accelerator == "XPU":
print("SYCL version:", torch.version.xpu)
print("Number of XPUs available:", torch.xpu.device_count())
elif accelerator == "HPU":
print("HPU version:", torch.__version__.split("+")[-1])
print("Number of HPUs available:", torch.hpu.device_count())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, get_installed_path, install_package,
is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path', 'install_package',
'is_abs', 'is_method_overridden', 'has_method', 'digit_version',
'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer', 'check_time',
'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_method, import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, requires_executable, requires_package,
slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
to_ntuple, tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .timer import Timer, TimerError, check_time
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'is_method_overridden', 'has_method',
'digit_version', 'get_git_hash', 'ManagerMeta', 'ManagerMixin', 'Timer',
'check_time', 'TimerError', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean,
unfold_wo_center, unmap, unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center'
]
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='Text')
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray.documents import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents Text
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='Text')
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can extend this Document:
.. code-block:: python
from docarray.documents import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseBinaryClassificationEvaluator(BinaryClassificationEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
labels: list[int],
name: str = "",
batch_size: int = 32,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
labels=labels,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder) -> dict[str, dict[str, float]]:
return super().compute_metrices(model=model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import BinaryClassificationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseBinaryClassificationEvaluator(BinaryClassificationEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
labels: list[int],
name: str = "",
batch_size: int = 32,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
labels=labels,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def compute_metrices(self, model: SparseEncoder) -> dict[str, dict[str, float]]:
return super().compute_metrices(model=model)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from typing import Any, Callable, Optional
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size of the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(transform=transform, target_transform=target_transform)
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
from typing import Any, Callable, Optional
import torch
from .. import transforms
from .vision import VisionDataset
class FakeData(VisionDataset):
"""A fake dataset that returns randomly generated images and returns them as PIL images
Args:
size (int, optional): Size of the dataset. Default: 1000 images
image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224)
num_classes(int, optional): Number of classes in the dataset. Default: 10
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
random_offset (int): Offsets the index-based random seed used to
generate each image. Default: 0
"""
def __init__(
self,
size: int = 1000,
image_size: tuple[int, int, int] = (3, 224, 224),
num_classes: int = 10,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
random_offset: int = 0,
) -> None:
super().__init__(transform=transform, target_transform=target_transform)
self.size = size
self.num_classes = num_classes
self.image_size = image_size
self.random_offset = random_offset
def __getitem__(self, index: int) -> tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
# create random image that is consistent with the index id
if index >= len(self):
raise IndexError(f"{self.__class__.__name__} index out of range")
rng_state = torch.get_rng_state()
torch.manual_seed(index + self.random_offset)
img = torch.randn(*self.image_size)
target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0]
torch.set_rng_state(rng_state)
# convert to PIL Image
img = transforms.ToPILImage()(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target.item()
def __len__(self) -> int:
return self.size
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
self._update_subindices_append_extend(docs)
def append(self, value: 'Document'):
self.extend([value])
def __del__(self) -> None:
if not self._persist:
self._offset2ids.clear()
self._annlite.clear()
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.memory import DocumentArrayInMemory
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods"""
def extend(self, values: Iterable['Document']) -> None:
docs = DocumentArrayInMemory(values)
if len(docs) == 0:
return
for doc in docs:
doc.embedding = self._map_embedding(doc.embedding)
self._annlite.index(docs)
self._offset2ids.extend([doc.id for doc in docs])
def __del__(self) -> None:
if not self._persist:
self._offset2ids.clear()
self._annlite.clear()
def __eq__(self, other):
"""In annlite backend, data are considered as identical if configs point to the same database source"""
return (
type(self) is type(other)
and type(self._config) is type(other._config)
and self._config == other._config
)
def __repr__(self):
return f'<DocumentArray[AnnLite] (length={len(self)}) at {id(self)}>'
def __contains__(self, x: Union[str, 'Document']):
if isinstance(x, str):
return self._annlite.get_doc_by_id(x) is not None
elif isinstance(x, Document):
return self._annlite.get_doc_by_id(x.id) is not None
else:
return False
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Maximum")
class Maximum(Merge):
"""Computes element-wise maximum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Maximum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.maximum([x1, x2])`
>>> y = keras.layers.Maximum()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
return self._apply_merge_op_and_or_mask(ops.maximum, inputs)
@keras_export("keras.layers.maximum")
def maximum(inputs, **kwargs):
"""Functional interface to the `keras.layers.Maximum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.maximum([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.maximum([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Maximum(**kwargs)(inputs)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Maximum")
class Maximum(Merge):
"""Computes element-wise maximum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Maximum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.maximum([x1, x2])`
>>> y = keras.layers.Maximum()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.maximum(output, inputs[i])
return output
@keras_export("keras.layers.maximum")
def maximum(inputs, **kwargs):
"""Functional interface to the `keras.layers.Maximum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the element-wise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.maximum([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.maximum([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Maximum(**kwargs)(inputs)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina import helper
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
|
"""Argparser module for Deployment runtimes"""
import argparse
from jina import helper
from jina.enums import DeploymentRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_base_deployment_parser(parser):
"""Add mixin arguments required by :class:`BaseDeployment` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Deployment')
gp.add_argument(
'--uses-before',
type=str,
help='The executor attached before the Pods described by --uses, typically before sending to all '
'shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--uses-after',
type=str,
help='The executor attached after the Pods described by --uses, typically used for receiving from '
'all shards, accepted type follows `--uses`. This argument only applies for sharded Deployments (shards > 1).',
)
gp.add_argument(
'--when',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The condition that the documents need to fulfill before reaching the Executor.'
'The condition can be defined in the form of a `DocArray query condition <https://docarray.jina.ai/fundamentals/documentarray/find/#query-by-conditions>`',
)
gp.add_argument(
'--external',
action='store_true',
default=False,
help='The Deployment will be considered an external Deployment that has been started independently from the Flow.'
'This Deployment will not be context managed by the Flow.',
)
# hidden CLI used for internal only
gp.add_argument(
'--deployment-role',
type=DeploymentRoleType.from_string,
choices=list(DeploymentRoleType),
help='The role of this deployment in the flow'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--tls',
action='store_true',
default=False,
help='If set, connect to deployment using tls encryption',
)
gp.add_argument(
'--port-monitoring',
type=str,
default=str(helper.random_port()),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
ControlNetModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
enable_full_determinism()
@slow
@require_torch_accelerator
class ControlNetModelSingleFileTests(unittest.TestCase):
model_class = ControlNetModel
ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
repo_id = "lllyasviel/control_v11p_sd15_canny"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path)
assert model_default.config.upcast_attention is False
assert model_default.dtype == torch.float32
torch_dtype = torch.float16
upcast_attention = True
model = self.model_class.from_single_file(
self.ckpt_path,
upcast_attention=upcast_attention,
torch_dtype=torch_dtype,
)
assert model.config.upcast_attention == upcast_attention
assert model.dtype == torch_dtype
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
ControlNetModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
enable_full_determinism()
@slow
@require_torch_accelerator
class ControlNetModelSingleFileTests(unittest.TestCase):
model_class = ControlNetModel
ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"
repo_id = "lllyasviel/control_v11p_sd15_canny"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path)
assert model_default.config.upcast_attention is False
assert model_default.dtype == torch.float32
torch_dtype = torch.float16
upcast_attention = True
model = self.model_class.from_single_file(
self.ckpt_path,
upcast_attention=upcast_attention,
torch_dtype=torch_dtype,
)
assert model.config.upcast_attention == upcast_attention
assert model.dtype == torch_dtype
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import Cutmix, Mixup, RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertImageDtype
from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, SanitizeBoundingBox, ToDtype
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional, utils # usort: skip
from ._transform import Transform # usort: skip
from ._augment import RandomErasing
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
Grayscale,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomGrayscale,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBox, ConvertBoundingBoxFormat, ConvertImageDtype
from ._misc import GaussianBlur, Identity, Lambda, LinearTransformation, Normalize, SanitizeBoundingBox, ToDtype
from ._temporal import UniformTemporalSubsample
from ._type_conversion import PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import ToTensor # usort: skip
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.4.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of TOOD. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of TOOD. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of TOOD. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of TOOD. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
"""Test LLM Bash functionality."""
import os
import sys
from unittest.mock import patch
import pytest
from langchain.chains.llm import LLMChain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
_parse_string_eval_output,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_eval_chain() -> None:
"""Test a simple eval chain."""
example = {"query": "What's my name", "answer": "John Doe"}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = QAEvalChain.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert fake_qa_eval_chain.output_key in outputs[0]
assert outputs[0][fake_qa_eval_chain.output_key] == "foo"
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
@pytest.mark.parametrize("chain_cls", [ContextQAEvalChain, CotQAEvalChain])
def test_context_eval_chain(chain_cls: type[ContextQAEvalChain]) -> None:
"""Test a simple eval chain."""
example = {
"query": "What's my name",
"context": "The name of this person is John Doe",
}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = chain_cls.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert "text" in outputs[0]
assert outputs[0]["text"] == "foo"
def test_load_criteria_evaluator() -> None:
"""Test loading a criteria evaluator."""
try:
from langchain_openai import ChatOpenAI # noqa: F401
except ImportError:
pytest.skip("langchain-openai not installed")
# Patch the env with an openai-api-key
with patch.dict(os.environ, {"OPENAI_API_KEY": "foo"}):
# Check it can load using a string arg (even if that's not how it's typed)
load_evaluator("criteria") # type: ignore[arg-type]
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_implements_string_evaluator_protocol(
chain_cls: type[LLMChain],
) -> None:
assert issubclass(chain_cls, StringEvaluator)
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_returns_expected_results(
chain_cls: type[LLMChain],
) -> None:
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
chain = chain_cls.from_llm(fake_llm) # type: ignore[attr-defined]
results = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert results["score"] == 1
@pytest.mark.parametrize(
"output,expected",
[
(
""" GRADE: CORRECT
QUESTION: according to the passage, what is the main reason that the author wrote this passage?
STUDENT ANSWER: to explain the importance of washing your hands
TRUE ANSWER: to explain the importance of washing your hands
GRADE:""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" Here is my step-by-step reasoning to grade the student's answer:
1. The question asks who founded the Roanoke settlement.
2. The context states that the grade incorrect answer is Walter Raleigh.
3. The student's answer is "Sir Walter Raleigh".
4. The student's answer matches the context, which states the answer is Walter Raleigh.
5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct.
6. Therefore, the student's answer contains the same factual information as the true answer, so it should be graded as correct.
GRADE: CORRECT""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" CORRECT
QUESTION: who was the first president of the united states?
STUDENT ANSWER: George Washington
TRUE ANSWER: George Washington was the first president of the United States.
GRADE:""",
{
"value": "CORRECT",
"score": 1,
},
),
(
"""The student's answer is "Regent's Park," which matches the correct answer given in the context. Therefore, the student's answer is CORRECT.""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
],
)
def test_qa_output_parser(output: str, expected: dict) -> None:
expected["reasoning"] = output.strip()
assert _parse_string_eval_output(output) == expected
|
"""Test LLM Bash functionality."""
import os
import sys
from unittest.mock import patch
import pytest
from langchain.chains.llm import LLMChain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
_parse_string_eval_output,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_eval_chain() -> None:
"""Test a simple eval chain."""
example = {"query": "What's my name", "answer": "John Doe"}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = QAEvalChain.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert fake_qa_eval_chain.output_key in outputs[0]
assert outputs[0][fake_qa_eval_chain.output_key] == "foo"
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
@pytest.mark.parametrize("chain_cls", [ContextQAEvalChain, CotQAEvalChain])
def test_context_eval_chain(chain_cls: type[ContextQAEvalChain]) -> None:
"""Test a simple eval chain."""
example = {
"query": "What's my name",
"context": "The name of this person is John Doe",
}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = chain_cls.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert "text" in outputs[0]
assert outputs[0]["text"] == "foo"
def test_load_criteria_evaluator() -> None:
"""Test loading a criteria evaluator."""
try:
from langchain_openai import ChatOpenAI # noqa: F401
except ImportError:
pytest.skip("langchain-openai not installed")
# Patch the env with an openai-api-key
with patch.dict(os.environ, {"OPENAI_API_KEY": "foo"}):
# Check it can load using a string arg (even if that's not how it's typed)
load_evaluator("criteria") # type: ignore[arg-type]
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_implements_string_evaluator_protocol(
chain_cls: type[LLMChain],
) -> None:
assert issubclass(chain_cls, StringEvaluator)
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_returns_expected_results(
chain_cls: type[LLMChain],
) -> None:
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
chain = chain_cls.from_llm(fake_llm) # type: ignore[attr-defined]
results = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert results["score"] == 1
@pytest.mark.parametrize(
"output,expected",
[
(
""" GRADE: CORRECT
QUESTION: according to the passage, what is the main reason that the author wrote this passage?
STUDENT ANSWER: to explain the importance of washing your hands
TRUE ANSWER: to explain the importance of washing your hands
GRADE:""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" Here is my step-by-step reasoning to grade the student's answer:
1. The question asks who founded the Roanoke settlement.
2. The context states that the grade incorrect answer is Walter Raleigh.
3. The student's answer is "Sir Walter Raleigh".
4. The student's answer matches the context, which states the answer is Walter Raleigh.
5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct.
6. Therefore, the student's answer contains the same factual information as the true answer, so it should be graded as correct.
GRADE: CORRECT""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" CORRECT
QUESTION: who was the first president of the united states?
STUDENT ANSWER: George Washington
TRUE ANSWER: George Washington was the first president of the United States.
GRADE:""",
{
"value": "CORRECT",
"score": 1,
},
),
(
"""The student's answer is "Regent's Park," which matches the correct answer given in the context. Therefore, the student's answer is CORRECT.""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
],
)
def test_qa_output_parser(output: str, expected: dict) -> None:
expected["reasoning"] = output.strip()
assert _parse_string_eval_output(output) == expected
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.6.0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.5.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image shape on CrowdHuman dataset.
Here is an example to run this script.
Example:
python tools/misc/get_crowdhuman_id_hw.py ${CONFIG} \
--dataset ${DATASET_TYPE}
"""
import argparse
import json
import logging
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import dump, get, get_text
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
choices=['train', 'val'],
help='Collect image metas from which dataset')
parser.add_argument(
'--nproc',
default=10,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_image_metas(anno_str, img_prefix):
id_hw = {}
anno_dict = json.loads(anno_str)
img_path = osp.join(img_prefix, f"{anno_dict['ID']}.jpg")
img_id = anno_dict['ID']
img_bytes = get(img_path)
img = mmcv.imfrombytes(img_bytes, backend='cv2')
id_hw[img_id] = img.shape[:2]
return id_hw
def main():
args = parse_args()
# get ann_file and img_prefix from config files
cfg = Config.fromfile(args.config)
dataset = args.dataset
dataloader_cfg = cfg.get(f'{dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
# load image metas
print_log(
f'loading CrowdHuman {dataset} annotation...', level=logging.INFO)
anno_strs = get_text(ann_file).strip().split('\n')
pool = Pool(args.nproc)
# get image metas with multiple processes
id_hw_temp = pool.starmap(
get_image_metas,
zip(anno_strs, [img_prefix for _ in range(len(anno_strs))]),
)
pool.close()
# save image metas
id_hw = {}
for sub_dict in id_hw_temp:
id_hw.update(sub_dict)
data_root = osp.dirname(ann_file)
save_path = osp.join(data_root, f'id_hw_{dataset}.json')
print_log(
f'\nsaving "id_hw_{dataset}.json" in "{data_root}"',
level=logging.INFO)
dump(id_hw, save_path, file_format='json')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image shape on CrowdHuman dataset.
Here is an example to run this script.
Example:
python tools/misc/get_crowdhuman_id_hw.py ${CONFIG} \
--dataset ${DATASET_TYPE}
"""
import argparse
import json
import logging
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
choices=['train', 'val'],
help='Collect image metas from which dataset')
parser.add_argument(
'--nproc',
default=10,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_image_metas(anno_str, img_prefix):
id_hw = {}
file_client = FileClient(backend='disk')
anno_dict = json.loads(anno_str)
img_path = osp.join(img_prefix, f"{anno_dict['ID']}.jpg")
img_id = anno_dict['ID']
img_bytes = file_client.get(img_path)
img = mmcv.imfrombytes(img_bytes, backend='cv2')
id_hw[img_id] = img.shape[:2]
return id_hw
def main():
args = parse_args()
# get ann_file and img_prefix from config files
cfg = Config.fromfile(args.config)
file_client_args = cfg.get('file_client_args', dict(backend='disk'))
file_client = FileClient(**file_client_args)
dataset = args.dataset
dataloader_cfg = cfg.get(f'{dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
# load image metas
print_log(
f'loading CrowdHuman {dataset} annotation...', level=logging.INFO)
anno_strs = file_client.get_text(ann_file).strip().split('\n')
pool = Pool(args.nproc)
# get image metas with multiple processes
id_hw_temp = pool.starmap(
get_image_metas,
zip(anno_strs, [img_prefix for _ in range(len(anno_strs))]),
)
pool.close()
# save image metas
id_hw = {}
for sub_dict in id_hw_temp:
id_hw.update(sub_dict)
data_root = osp.dirname(ann_file)
save_path = osp.join(data_root, f'id_hw_{dataset}.json')
print_log(
f'\nsaving "id_hw_{dataset}.json" in "{data_root}"',
level=logging.INFO)
dump(id_hw, save_path, file_format='json')
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.