input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES:
yaml = _get_yaml(template, params)
if params.get('device_plugins'):
yaml = _get_deployment_with_device_plugins(yaml, params)
if params.get('env_from_secret'):
yaml = _get_deployment_with_env_secret(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = str(value)
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
def _get_deployment_with_env_secret(deployment: Dict, params: Dict) -> Dict:
for k,v in params['env_from_secret'].items():
env_var = {}
env_var['name'] = k
env_var['valueFrom'] = {'secretKeyRef': {'name': v['name'], 'key': v['key']}}
deployment['spec']['template']['spec']['containers'][0]['env'].append(env_var)
return deployment
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES and params.get('device_plugins'):
yaml = _get_yaml(template, params)
yaml = _get_deployment_with_device_plugins(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path) as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = str(value)
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `DATASET_TYPE` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['DATASET_TYPE'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
import os
from typing import Callable, List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from jinahub.encoder.transform_encoder import TransformerTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_compute_tokens():
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(["hello this is a test", "and another test"])
assert tokens["input_ids"].shape == (2, 7)
assert tokens["attention_mask"].shape == (2, 7)
@pytest.mark.parametrize(
'hidden_seqlen', [4, 8]
)
def test_compute_embeddings(hidden_seqlen):
embedding_size = 10
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(["hello world"])
hidden_states = tuple(torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7))
embeddings = enc._compute_embedding(
hidden_states=hidden_states, input_tokens=tokens
)
assert embeddings.shape == (1, embedding_size)
def test_encoding_cpu():
enc = TransformerTorchEncoder(device="cpu")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="GPU is needed for this test")
def test_encoding_gpu():
enc = TransformerTorchEncoder(device="cuda")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = TransformerTorchEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res).traverse_flat([path]).get_attributes("embedding")
)
for emb in embeddings:
if emb is None:
return False
return len(embeddings) == count
return validate
encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {"traversal_paths": [traversal_path]})
assert validate_traversal(docs_per_path)(docs)
def test_multiple_traversal_paths():
sentences = list()
sentences.append("Hello, my name is Michael.")
sentences.append("Today we are going to Disney World.")
sentences.append("There are animals on the road")
sentences.append("A dog is running down the road")
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = TransformerTorchEncoder(default_traversal_paths=["r", "c", "cc"])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (768,)
assert doc.chunks[0].embedding.shape == (768,)
assert doc.chunks[0].chunks[0].embedding.shape == (768,)
|
import os
from typing import Callable, List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from jinahub.encoder.transform_encoder import TransformerTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_compute_tokens():
enc = TransformerTorchEncoder(base_tokenizer_model="bert-base-cased")
tokens = enc._generate_input_tokens(["hello this is a test", "and another test"])
assert tokens["input_ids"].shape == (2, 7)
assert tokens["token_type_ids"].shape == (2, 7)
assert tokens["attention_mask"].shape == (2, 7)
@pytest.mark.parametrize(
'hidden_seqlen', [4, 8]
)
def test_compute_embeddings(hidden_seqlen):
embedding_size = 10
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(["hello world"])
hidden_states = tuple(torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7))
embeddings = enc._compute_embedding(
hidden_states=hidden_states, input_tokens=tokens
)
assert embeddings.shape == (1, embedding_size)
def test_encoding_cpu():
enc = TransformerTorchEncoder(device="cpu")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="GPU is needed for this test")
def test_encoding_gpu():
enc = TransformerTorchEncoder(device="cuda")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = TransformerTorchEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res).traverse_flat([path]).get_attributes("embedding")
)
for emb in embeddings:
if emb is None:
return False
return len(embeddings) == count
return validate
encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {"traversal_paths": [traversal_path]})
assert validate_traversal(docs_per_path)(docs)
def test_multiple_traversal_paths():
sentences = list()
sentences.append("Hello, my name is Michael.")
sentences.append("Today we are going to Disney World.")
sentences.append("There are animals on the road")
sentences.append("A dog is running down the road")
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = TransformerTorchEncoder(default_traversal_paths=["r", "c", "cc"])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (768,)
assert doc.chunks[0].embedding.shape == (768,)
assert doc.chunks[0].chunks[0].embedding.shape == (768,)
|
"""A class for JAX specific optimizer logic.
Its purpose is to route around statelessness
requirements in cond ops used for EMA handling
and gradient accumulation handling. We do this
by skipping conditionals entirely.
"""
import jax
from jax import numpy as jnp
from keras.src.optimizers import base_optimizer
class JaxOptimizer(base_optimizer.BaseOptimizer):
def _backend_apply_gradients(self, grads, trainable_variables):
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
steps = self.gradient_accumulation_steps
current_trainable_vars_value = [
v.value for v in trainable_variables
]
current_optimizer_vars_value = [v.value for v in self.variables]
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
new_g_accs = jax.lax.cond(
is_update_step,
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)],
)
grads = jax.lax.cond(
is_update_step,
lambda: [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
],
lambda: list(grads),
)
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
new_trainable_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in trainable_variables],
lambda: current_trainable_vars_value,
)
new_opt_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in self.variables],
lambda: current_optimizer_vars_value,
)
for value, v in zip(new_trainable_vars, trainable_variables):
v.assign(value)
for value, v in zip(new_opt_vars, self.variables):
v.assign(value)
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency is not None:
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
should_overwrite_model_vars_int = (
should_overwrite_model_vars.astype("int32")
)
should_not_overwrite_model_vars_int = jnp.logical_not(
should_overwrite_model_vars
).astype("int32")
current_trainable_vars_value = [
v.value for v in self._trainable_variables
]
for var, average_var in zip(
self._trainable_variables,
self._model_variables_moving_average,
):
var.assign(
average_var * should_overwrite_model_vars_int
+ var.value * should_not_overwrite_model_vars_int
)
self._iterations.assign_add(1)
|
"""A class for JAX specific optimizer logic.
Its purpose is to route around statelessness
requirements in cond ops used for EMA handling
and gradient accumulation handling. We do this
by skipping conditionals entirely.
"""
import jax
from jax import numpy as jnp
from keras.src.optimizers import base_optimizer
class JaxOptimizer(base_optimizer.BaseOptimizer):
def _backend_apply_gradients(self, grads, trainable_variables):
if self.gradient_accumulation_steps:
is_update_step = (
self._iterations + 1
) % self.gradient_accumulation_steps == 0
steps = self.gradient_accumulation_steps
current_trainable_vars_value = [
v.value for v in trainable_variables
]
current_optimizer_vars_value = [v.value for v in self.variables]
# `trainable_variables` might have been filtered in previous
# processing steps, so we need to ensure the correct mapping between
# `self._accumulated_gradients` and `trainable_variables`
acc_grads = [
self._accumulated_gradients[self._get_variable_index(v)]
for v in trainable_variables
]
new_g_accs = jax.lax.cond(
is_update_step,
lambda: [jnp.zeros(g.shape, dtype=g.dtype) for g in acc_grads],
lambda: [g + acc_g for g, acc_g in zip(grads, acc_grads)],
)
grads = jax.lax.cond(
is_update_step,
lambda: [
(g + acc_g) / steps for g, acc_g in zip(grads, acc_grads)
],
lambda: list(grads),
)
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
new_trainable_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in trainable_variables],
lambda: current_trainable_vars_value,
)
new_opt_vars = jax.lax.cond(
is_update_step,
lambda: [v.value for v in self.variables],
lambda: current_optimizer_vars_value,
)
for value, v in zip(new_trainable_vars, trainable_variables):
v.assign(value)
for value, v in zip(new_opt_vars, self.variables):
v.assign(value)
for n_g_acc, g_acc in zip(new_g_accs, acc_grads):
g_acc.assign(n_g_acc)
else:
# Apply clipping and weight decay.
grads = self._clip_gradients(grads)
self._apply_weight_decay(trainable_variables)
self._backend_update_step(
grads, trainable_variables, self.learning_rate
)
if self.use_ema:
self._update_model_variables_moving_average(
self._trainable_variables
)
if self.ema_overwrite_frequency is not None:
should_overwrite_model_vars = (
self.iterations + 1
) % self.ema_overwrite_frequency == 0
should_overwrite_model_vars_int = (
should_overwrite_model_vars.astype("int32")
)
should_not_overwrite_model_vars_int = jnp.logical_not(
should_overwrite_model_vars
).astype("int32")
current_trainable_vars_value = [
v.value for v in self._trainable_variables
]
for var, average_var in zip(
self._trainable_variables,
self._model_variables_moving_average,
):
var.assign(
average_var * should_overwrite_model_vars_int
+ var.value * should_not_overwrite_model_vars_int
)
self._iterations.assign_add(1)
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import CSRLoss
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import SparseEncoderTrainingArguments
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], truncate_dim=k_dim))
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.evaluation import SequentialEvaluator
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation.SparseNanoBEIREvaluator import SparseNanoBEIREvaluator
from sentence_transformers.sparse_encoder.losses import CSRLoss
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
# Initialize model components
model_name = "microsoft/mpnet-base"
transformer = Transformer(model_name)
# transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
# 2a. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Initialize the loss
loss = CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(SparseNanoBEIREvaluator(["msmarco", "nfcorpus", "nq"], truncate_dim=k_dim))
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
dev_evaluator(model)
# Set up training arguments
run_name = "sparse-mpnet-base-nq-fresh"
training_args = SparseEncoderTrainingArguments(
output_dir=f"models/{run_name}",
num_train_epochs=1,
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=200,
eval_strategy="steps",
eval_steps=400,
save_strategy="steps",
save_steps=400,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name=run_name,
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance again after training
dev_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save_pretrained(f"models/{run_name}/final")
model.push_to_hub(run_name)
if __name__ == "__main__":
main()
|
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .autograd_test_impl import AutogradTestFloat32, AutogradTestMixin
@skipIfNoCuda
class AutogradCUDATest(AutogradTestMixin, PytorchTestCase):
device = "cuda"
@skipIfNoCuda
class AutogradRNNTCUDATest(AutogradTestFloat32, PytorchTestCase):
device = "cuda"
|
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoCuda,
)
from .autograd_test_impl import AutogradTestMixin, AutogradTestFloat32
@skipIfNoCuda
class AutogradCUDATest(AutogradTestMixin, PytorchTestCase):
device = "cuda"
@skipIfNoCuda
class AutogradRNNTCUDATest(AutogradTestFloat32, PytorchTestCase):
device = "cuda"
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Union
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
OptSamplingResultList = Optional[SamplingResultList]
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Collecting some commonly used type hint in mmdetection."""
from typing import List, Optional, Union
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from ..bbox.samplers import SamplingResult
from ..data_structures import DetDataSample
# Type hint of config data
ConfigType = Union[ConfigDict, dict]
OptConfigType = Optional[ConfigType]
# Type hint of one or more config data
MultiConfig = Union[ConfigType, List[ConfigType]]
OptMultiConfig = Optional[MultiConfig]
InstanceList = List[InstanceData]
OptInstanceList = Optional[InstanceList]
SampleList = List[DetDataSample]
OptSampleList = Optional[SampleList]
SamplingResultList = List[SamplingResult]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
'bbox_xyxy_to_cxcywh', 'RegionAssigner'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'CenterRegionAssigner',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
'RegionAssigner'
]
|
import json
from typing import Any, Type, TypeGuard, TypeVar, overload
import jsonschema
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from .type import type_match
def to_dict(data) -> dict:
if isinstance(data, BaseModel):
data = data.model_dump()
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(to_dict(data))
T = TypeVar("T")
@overload
def loads(data: str | bytes, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str | bytes, *args, **kwargs) -> Any: ...
def loads(
data: str | bytes, *args, target_type: Type[T] | None = None, **kwargs
) -> Any:
if isinstance(data, bytes):
data = data.decode("utf-8")
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
def validate_with_jsonschema(
schema: dict[str, Any], data: dict[str, Any]
) -> str | None:
"""
Validate the data against the schema.
Returns the validation error message if the data does not match the schema.
"""
try:
jsonschema.validate(data, schema)
return None
except jsonschema.ValidationError as e:
return str(e)
def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]:
return isinstance(value, list) and all(
isinstance(item, BaseModel) for item in value
)
def convert_pydantic_to_json(output_data: Any) -> Any:
if isinstance(output_data, BaseModel):
return output_data.model_dump()
if is_list_of_basemodels(output_data):
return [item.model_dump() for item in output_data]
return output_data
|
import json
from typing import Any, Type, TypeGuard, TypeVar, overload
import jsonschema
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from .type import type_match
def to_dict(data) -> dict:
if isinstance(data, BaseModel):
data = data.model_dump()
return jsonable_encoder(data)
def dumps(data) -> str:
return json.dumps(jsonable_encoder(data))
T = TypeVar("T")
@overload
def loads(data: str, *args, target_type: Type[T], **kwargs) -> T: ...
@overload
def loads(data: str, *args, **kwargs) -> Any: ...
def loads(data: str, *args, target_type: Type[T] | None = None, **kwargs) -> Any:
parsed = json.loads(data, *args, **kwargs)
if target_type:
return type_match(parsed, target_type)
return parsed
def validate_with_jsonschema(
schema: dict[str, Any], data: dict[str, Any]
) -> str | None:
"""
Validate the data against the schema.
Returns the validation error message if the data does not match the schema.
"""
try:
jsonschema.validate(data, schema)
return None
except jsonschema.ValidationError as e:
return str(e)
def is_list_of_basemodels(value: object) -> TypeGuard[list[BaseModel]]:
return isinstance(value, list) and all(
isinstance(item, BaseModel) for item in value
)
def convert_pydantic_to_json(output_data: Any) -> Any:
if isinstance(output_data, BaseModel):
return output_data.model_dump()
if is_list_of_basemodels(output_data):
return [item.model_dump() for item in output_data]
return output_data
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ga-retinanet_r101-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 480), (1333, 960)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3.0,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""Abstract interface for document loader implementations."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from langchain_core.runnables import run_in_executor
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from langchain_text_splitters import TextSplitter
from langchain_core.documents import Document
from langchain_core.documents.base import Blob
class BaseLoader(ABC): # noqa: B024
"""Interface for Document Loader.
Implementations should implement the lazy-loading method using generators
to avoid loading all Documents into memory at once.
`load` is provided just for user convenience and should not be overridden.
"""
# Sub-classes should not implement this method directly. Instead, they
# should implement the lazy load method.
def load(self) -> list[Document]:
"""Load data into Document objects."""
return list(self.lazy_load())
async def aload(self) -> list[Document]:
"""Load data into Document objects."""
return [document async for document in self.alazy_load()]
def load_and_split(
self, text_splitter: Optional[TextSplitter] = None
) -> list[Document]:
"""Load Documents and split into chunks. Chunks are returned as Documents.
Do not override this method. It should be considered to be deprecated!
Args:
text_splitter: TextSplitter instance to use for splitting documents.
Defaults to RecursiveCharacterTextSplitter.
Returns:
List of Documents.
"""
if text_splitter is None:
try:
from langchain_text_splitters import RecursiveCharacterTextSplitter
except ImportError as e:
msg = (
"Unable to import from langchain_text_splitters. Please specify "
"text_splitter or install langchain_text_splitters with "
"`pip install -U langchain-text-splitters`."
)
raise ImportError(msg) from e
text_splitter_: TextSplitter = RecursiveCharacterTextSplitter()
else:
text_splitter_ = text_splitter
docs = self.load()
return text_splitter_.split_documents(docs)
# Attention: This method will be upgraded into an abstractmethod once it's
# implemented in all the existing subclasses.
def lazy_load(self) -> Iterator[Document]:
"""A lazy loader for Documents."""
if type(self).load != BaseLoader.load:
return iter(self.load())
msg = f"{self.__class__.__name__} does not implement lazy_load()"
raise NotImplementedError(msg)
async def alazy_load(self) -> AsyncIterator[Document]:
"""A lazy loader for Documents."""
iterator = await run_in_executor(None, self.lazy_load)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
class BaseBlobParser(ABC):
"""Abstract interface for blob parsers.
A blob parser provides a way to parse raw data stored in a blob into one
or more documents.
The parser can be composed with blob loaders, making it easy to reuse
a parser independent of how the blob was originally loaded.
"""
@abstractmethod
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazy parsing interface.
Subclasses are required to implement this method.
Args:
blob: Blob instance
Returns:
Generator of documents
"""
def parse(self, blob: Blob) -> list[Document]:
"""Eagerly parse the blob into a document or documents.
This is a convenience method for interactive development environment.
Production applications should favor the lazy_parse method instead.
Subclasses should generally not over-ride this parse method.
Args:
blob: Blob instance
Returns:
List of documents
"""
return list(self.lazy_parse(blob))
|
"""Abstract interface for document loader implementations."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Optional
from langchain_core.runnables import run_in_executor
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Iterator
from langchain_text_splitters import TextSplitter
from langchain_core.documents import Document
from langchain_core.documents.base import Blob
class BaseLoader(ABC): # noqa: B024
"""Interface for Document Loader.
Implementations should implement the lazy-loading method using generators
to avoid loading all Documents into memory at once.
`load` is provided just for user convenience and should not be overridden.
"""
# Sub-classes should not implement this method directly. Instead, they
# should implement the lazy load method.
def load(self) -> list[Document]:
"""Load data into Document objects."""
return list(self.lazy_load())
async def aload(self) -> list[Document]:
"""Load data into Document objects."""
return [document async for document in self.alazy_load()]
def load_and_split(
self, text_splitter: Optional[TextSplitter] = None
) -> list[Document]:
"""Load Documents and split into chunks. Chunks are returned as Documents.
Do not override this method. It should be considered to be deprecated!
Args:
text_splitter: TextSplitter instance to use for splitting documents.
Defaults to RecursiveCharacterTextSplitter.
Returns:
List of Documents.
"""
if text_splitter is None:
try:
from langchain_text_splitters import RecursiveCharacterTextSplitter
except ImportError as e:
msg = (
"Unable to import from langchain_text_splitters. Please specify "
"text_splitter or install langchain_text_splitters with "
"`pip install -U langchain-text-splitters`."
)
raise ImportError(msg) from e
_text_splitter: TextSplitter = RecursiveCharacterTextSplitter()
else:
_text_splitter = text_splitter
docs = self.load()
return _text_splitter.split_documents(docs)
# Attention: This method will be upgraded into an abstractmethod once it's
# implemented in all the existing subclasses.
def lazy_load(self) -> Iterator[Document]:
"""A lazy loader for Documents."""
if type(self).load != BaseLoader.load:
return iter(self.load())
msg = f"{self.__class__.__name__} does not implement lazy_load()"
raise NotImplementedError(msg)
async def alazy_load(self) -> AsyncIterator[Document]:
"""A lazy loader for Documents."""
iterator = await run_in_executor(None, self.lazy_load)
done = object()
while True:
doc = await run_in_executor(None, next, iterator, done)
if doc is done:
break
yield doc # type: ignore[misc]
class BaseBlobParser(ABC):
"""Abstract interface for blob parsers.
A blob parser provides a way to parse raw data stored in a blob into one
or more documents.
The parser can be composed with blob loaders, making it easy to reuse
a parser independent of how the blob was originally loaded.
"""
@abstractmethod
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazy parsing interface.
Subclasses are required to implement this method.
Args:
blob: Blob instance
Returns:
Generator of documents
"""
def parse(self, blob: Blob) -> list[Document]:
"""Eagerly parse the blob into a document or documents.
This is a convenience method for interactive development environment.
Production applications should favor the lazy_parse method instead.
Subclasses should generally not over-ride this parse method.
Args:
blob: Blob instance
Returns:
List of documents
"""
return list(self.lazy_parse(blob))
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import (
Booster,
DataIter,
DMatrix,
ExtMemQuantileDMatrix,
QuantileDMatrix,
_py_version,
build_info,
)
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"QuantileDMatrix",
"ExtMemQuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
|
"""XGBoost: eXtreme Gradient Boosting library.
Contributors: https://github.com/dmlc/xgboost/blob/master/CONTRIBUTORS.md
"""
from . import tracker # noqa
from . import collective, dask
from .core import Booster, DataIter, DMatrix, QuantileDMatrix, _py_version, build_info
from .tracker import RabitTracker # noqa
from .training import cv, train
try:
from .config import config_context, get_config, set_config
from .plotting import plot_importance, plot_tree, to_graphviz
from .sklearn import (
XGBClassifier,
XGBModel,
XGBRanker,
XGBRegressor,
XGBRFClassifier,
XGBRFRegressor,
)
except ImportError:
pass
__version__ = _py_version()
__all__ = [
# core
"DMatrix",
"QuantileDMatrix",
"Booster",
"DataIter",
"train",
"cv",
# utilities
"RabitTracker",
"build_info",
"plot_importance",
"plot_tree",
"to_graphviz",
"set_config",
"get_config",
"config_context",
# sklearn
"XGBModel",
"XGBClassifier",
"XGBRegressor",
"XGBRanker",
"XGBRFClassifier",
"XGBRFRegressor",
# dask
"dask",
# collective
"collective",
]
|
from typing import Any, Callable, Optional, Sequence
from llama_index.core.base.embeddings.base import (
BaseEmbedding,
SimilarityMode,
similarity,
)
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.settings import Settings
class SemanticSimilarityEvaluator(BaseEvaluator):
"""
Embedding similarity evaluator.
Evaluate the quality of a question answering system by
comparing the similarity between embeddings of the generated answer
and the reference answer.
Inspired by this paper:
- Semantic Answer Similarity for Evaluating Question Answering Models
https://arxiv.org/pdf/2108.06130.pdf
Args:
similarity_threshold (float): Embedding similarity threshold for "passing".
Defaults to 0.8.
"""
def __init__(
self,
embed_model: Optional[BaseEmbedding] = None,
similarity_fn: Optional[Callable[..., float]] = None,
similarity_mode: Optional[SimilarityMode] = None,
similarity_threshold: float = 0.8,
) -> None:
self._embed_model = embed_model or Settings.embed_model
if similarity_fn is None:
similarity_mode = similarity_mode or SimilarityMode.DEFAULT
self._similarity_fn = lambda x, y: similarity(x, y, mode=similarity_mode)
else:
if similarity_mode is not None:
raise ValueError(
"Cannot specify both similarity_fn and similarity_mode"
)
self._similarity_fn = similarity_fn
self._similarity_threshold = similarity_threshold
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference: Optional[str] = None,
**kwargs: Any,
) -> EvaluationResult:
del query, contexts, kwargs # Unused
if response is None or reference is None:
raise ValueError("Must specify both response and reference")
response_embedding = await self._embed_model.aget_text_embedding(response)
reference_embedding = await self._embed_model.aget_text_embedding(reference)
similarity_score = self._similarity_fn(response_embedding, reference_embedding)
passing = similarity_score >= self._similarity_threshold
return EvaluationResult(
score=similarity_score,
passing=passing,
feedback=f"Similarity score: {similarity_score}",
)
|
from typing import Any, Callable, Optional, Sequence
from llama_index.core.base.embeddings.base import (
BaseEmbedding,
SimilarityMode,
similarity,
)
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType
from llama_index.core.settings import Settings
class SemanticSimilarityEvaluator(BaseEvaluator):
"""Embedding similarity evaluator.
Evaluate the quality of a question answering system by
comparing the similarity between embeddings of the generated answer
and the reference answer.
Inspired by this paper:
- Semantic Answer Similarity for Evaluating Question Answering Models
https://arxiv.org/pdf/2108.06130.pdf
Args:
similarity_threshold (float): Embedding similarity threshold for "passing".
Defaults to 0.8.
"""
def __init__(
self,
embed_model: Optional[BaseEmbedding] = None,
similarity_fn: Optional[Callable[..., float]] = None,
similarity_mode: Optional[SimilarityMode] = None,
similarity_threshold: float = 0.8,
) -> None:
self._embed_model = embed_model or Settings.embed_model
if similarity_fn is None:
similarity_mode = similarity_mode or SimilarityMode.DEFAULT
self._similarity_fn = lambda x, y: similarity(x, y, mode=similarity_mode)
else:
if similarity_mode is not None:
raise ValueError(
"Cannot specify both similarity_fn and similarity_mode"
)
self._similarity_fn = similarity_fn
self._similarity_threshold = similarity_threshold
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
reference: Optional[str] = None,
**kwargs: Any,
) -> EvaluationResult:
del query, contexts, kwargs # Unused
if response is None or reference is None:
raise ValueError("Must specify both response and reference")
response_embedding = await self._embed_model.aget_text_embedding(response)
reference_embedding = await self._embed_model.aget_text_embedding(reference)
similarity_score = self._similarity_fn(response_embedding, reference_embedding)
passing = similarity_score >= self._similarity_threshold
return EvaluationResult(
score=similarity_score,
passing=passing,
feedback=f"Similarity score: {similarity_score}",
)
|
from pathlib import Path
from typing import List
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Executor
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='module')
def basic_encoder() -> AudioCLIPTextEncoder:
return AudioCLIPTextEncoder(
model_path=str(Path(__file__).parents[2] / '.cache/AudioCLIP-Full-Training.pt'),
tokenizer_path=str(
Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz'
),
)
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'model_path': str(
Path(__file__).parents[2] / '.cache/AudioCLIP-Full-Training.pt'
),
'tokenizer_path': str(
Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz'
),
},
)
assert ex.batch_size == 32
def test_no_document(basic_encoder: AudioCLIPTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda', download_model=True)
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: AudioCLIPTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: AudioCLIPTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from pathlib import Path
from typing import List
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Executor
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='module')
def basic_encoder() -> AudioCLIPTextEncoder:
return AudioCLIPTextEncoder(
model_path=str(Path(__file__).parents[2] / '.cache/AudioCLIP-Full-Training.pt'),
tokenizer_path=str(
Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz'
),
)
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'model_path': str(
Path(__file__).parents[2] / '.cache/AudioCLIP-Full-Training.pt'
),
'tokenizer_path': str(
Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz'
),
},
)
assert ex.batch_size == 32
def test_no_document(basic_encoder: AudioCLIPTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: AudioCLIPTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: AudioCLIPTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 3
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed)[0]
decoded_document = model.decode(document_embed[0], top_k=100)
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# ------------------------------------------example with v2 distill-----------------------------------------
Similarity: tensor([[17.5307]], device='cuda:0')
Top tokens 3 for each text:
Token: ny, Query score: 5.7729, Document score: 1.4109
Token: weather, Query score: 4.5684, Document score: 1.4673
Token: now, Query score: 3.5895, Document score: 0.7473
"""
print("# -----------------------------------------example with v3 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling(pooling_strategy="max", activation_function="log1p_relu"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
decoded_query = model.decode(query_embed)[0]
decoded_document = model.decode(document_embed[0], top_k=100)
for i in range(top_k):
query_token, query_score = decoded_query[i]
doc_score = next((score for token, score in decoded_document if token == query_token), 0)
if doc_score != 0:
print(f"Token: {query_token}, Query score: {query_score:.4f}, Document score: {doc_score:.4f}")
"""
# -----------------------------------------example with v3 distill-----------------------------------------
Similarity: tensor([[11.1105]], device='cuda:0')
Top tokens 10 for each text:
Token: ny, Query score: 5.7729, Document score: 0.8049
Token: weather, Query score: 4.5684, Document score: 0.9710
Token: now, Query score: 3.5895, Document score: 0.4720
Token: ?, Query score: 3.3313, Document score: 0.0286
Token: what, Query score: 2.7699, Document score: 0.0787
Token: in, Query score: 0.4989, Document score: 0.0417
"""
|
import numpy as np
from sentence_transformers import models
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
print("# ------------------------------------------example with v2 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 3
print(f"\nTop tokens {top_k} for each text:")
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-query_embed.to_dense().cpu().numpy())[:top_k][0]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
top_value_query_associate_score = query_embed.to_dense().cpu().numpy()[0, top_indices]
top_value_doc_associate_score = document_embed.to_dense().cpu().numpy()[0, top_indices]
for i in range(top_k):
if top_value_doc_associate_score[i] != 0:
print(
f"Token: {top_tokens[i]}, "
f"Query score: {top_value_query_associate_score[i]:.4f}, "
f"Document score: {top_value_doc_associate_score[i]:.4f}"
)
"""
# ------------------------------------------example with v2 distill-----------------------------------------
Similarity: tensor([[17.5307]], device='cuda:0')
Top tokens 3 for each text:
Token: ny, Query score: 5.7729, Document score: 1.4109
Token: weather, Query score: 4.5684, Document score: 1.4673
Token: now, Query score: 3.5895, Document score: 0.7473
"""
print("# -----------------------------------------example with v3 distill-----------------------------------------")
doc_encoder = MLMTransformer("opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill")
asym = models.Asym(
{
"query": [
IDF.from_json(
"opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill",
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling(pooling_strategy="max", activation_function="log1p_relu"),
],
}
)
model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
query = "What's the weather in ny now?"
document = "Currently New York is rainy."
query_embed = model.encode([{"query": query}])
document_embed = model.encode([{"doc": document}])
sim = model.similarity(query_embed, document_embed)
print(f"Similarity: {sim}")
# Visualize top tokens for each text
top_k = 10
print(f"\nTop tokens {top_k} for each text:")
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-query_embed.to_dense().cpu().numpy())[:top_k][0]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
top_value_query_associate_score = query_embed.to_dense().cpu().numpy()[0, top_indices]
top_value_doc_associate_score = document_embed.to_dense().cpu().numpy()[0, top_indices]
for i in range(top_k):
if top_value_doc_associate_score[i] != 0:
print(
f"Token: {top_tokens[i]}, "
f"Query score: {top_value_query_associate_score[i]:.4f}, "
f"Document score: {top_value_doc_associate_score[i]:.4f}"
)
"""
# -----------------------------------------example with v3 distill-----------------------------------------
Similarity: tensor([[11.1105]], device='cuda:0')
Top tokens 10 for each text:
Token: ny, Query score: 5.7729, Document score: 0.8049
Token: weather, Query score: 4.5684, Document score: 0.9710
Token: now, Query score: 3.5895, Document score: 0.4720
Token: ?, Query score: 3.3313, Document score: 0.0286
Token: what, Query score: 2.7699, Document score: 0.0787
Token: in, Query score: 0.4989, Document score: 0.0417
"""
|
import os
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.core import DataSplitMode
pytestmark = pytest.mark.skipif(
tm.no_arrow()["condition"] or tm.no_pandas()["condition"],
reason=tm.no_arrow()["reason"] + " or " + tm.no_pandas()["reason"],
)
import pandas as pd
import pyarrow as pa
import pyarrow.csv as pc
class TestArrowTable:
def test_arrow_table(self):
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table)
assert dm.num_row() == 2
assert dm.num_col() == 4
def test_arrow_table_with_label(self):
df = pd.DataFrame([[1, 2.0, 3.0], [2, 3.0, 4.0]], columns=["a", "b", "c"])
table = pa.Table.from_pandas(df)
label = np.array([0, 1])
dm = xgb.DMatrix(table)
dm.set_label(label)
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([0, 1]))
def test_arrow_table_from_np(self):
coldata = np.array(
[[1.0, 1.0, 0.0, 0.0], [2.0, 0.0, 1.0, 0.0], [3.0, 0.0, 0.0, 1.0]]
)
cols = list(map(pa.array, coldata))
table = pa.Table.from_arrays(cols, ["a", "b", "c"])
dm = xgb.DMatrix(table)
assert dm.num_row() == 4
assert dm.num_col() == 3
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_arrow_train(self, DMatrixT):
import pandas as pd
rows = 100
X = pd.DataFrame(
{
"A": np.random.randint(0, 10, size=rows),
"B": np.random.randn(rows),
"C": np.random.permutation([1, 0] * (rows // 2)),
}
)
y = pd.Series(np.random.randn(rows))
table = pa.Table.from_pandas(X)
dtrain1 = DMatrixT(table)
dtrain1.set_label(pa.Table.from_pandas(pd.DataFrame(y)))
bst1 = xgb.train({}, dtrain1, num_boost_round=10)
preds1 = bst1.predict(DMatrixT(X))
dtrain2 = DMatrixT(X, y)
bst2 = xgb.train({}, dtrain2, num_boost_round=10)
preds2 = bst2.predict(DMatrixT(X))
np.testing.assert_allclose(preds1, preds2)
preds3 = bst2.inplace_predict(table)
np.testing.assert_allclose(preds1, preds3)
assert bst2.feature_names == ["A", "B", "C"]
assert bst2.feature_types == ["int", "float", "int"]
def test_arrow_survival(self):
data = os.path.join(tm.data_dir(__file__), "veterans_lung_cancer.csv")
table = pc.read_csv(data)
y_lower_bound = table["Survival_label_lower_bound"]
y_upper_bound = table["Survival_label_upper_bound"]
X = table.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"])
dtrain = xgb.DMatrix(
X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
)
y_np_up = dtrain.get_float_info("label_upper_bound")
y_np_low = dtrain.get_float_info("label_lower_bound")
np.testing.assert_equal(y_np_up, y_upper_bound.to_pandas().values)
np.testing.assert_equal(y_np_low, y_lower_bound.to_pandas().values)
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
class TestArrowTableColumnSplit:
def test_arrow_table(self):
def verify_arrow_table():
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table, data_split_mode=DataSplitMode.COL)
assert dm.num_row() == 2
assert dm.num_col() == 4 * xgb.collective.get_world_size()
tm.run_with_rabit(world_size=3, test_fn=verify_arrow_table)
|
import os
import sys
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.core import DataSplitMode
try:
import pandas as pd
import pyarrow as pa
import pyarrow.csv as pc
except ImportError:
pass
pytestmark = pytest.mark.skipif(
tm.no_arrow()["condition"] or tm.no_pandas()["condition"],
reason=tm.no_arrow()["reason"] + " or " + tm.no_pandas()["reason"],
)
dpath = "demo/data/"
class TestArrowTable:
def test_arrow_table(self):
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table)
assert dm.num_row() == 2
assert dm.num_col() == 4
def test_arrow_table_with_label(self):
df = pd.DataFrame([[1, 2.0, 3.0], [2, 3.0, 4.0]], columns=["a", "b", "c"])
table = pa.Table.from_pandas(df)
label = np.array([0, 1])
dm = xgb.DMatrix(table)
dm.set_label(label)
assert dm.num_row() == 2
assert dm.num_col() == 3
np.testing.assert_array_equal(dm.get_label(), np.array([0, 1]))
def test_arrow_table_from_np(self):
coldata = np.array(
[[1.0, 1.0, 0.0, 0.0], [2.0, 0.0, 1.0, 0.0], [3.0, 0.0, 0.0, 1.0]]
)
cols = list(map(pa.array, coldata))
table = pa.Table.from_arrays(cols, ["a", "b", "c"])
dm = xgb.DMatrix(table)
assert dm.num_row() == 4
assert dm.num_col() == 3
@pytest.mark.parametrize("DMatrixT", [xgb.DMatrix, xgb.QuantileDMatrix])
def test_arrow_train(self, DMatrixT):
import pandas as pd
rows = 100
X = pd.DataFrame(
{
"A": np.random.randint(0, 10, size=rows),
"B": np.random.randn(rows),
"C": np.random.permutation([1, 0] * (rows // 2)),
}
)
y = pd.Series(np.random.randn(rows))
table = pa.Table.from_pandas(X)
dtrain1 = DMatrixT(table)
dtrain1.set_label(pa.Table.from_pandas(pd.DataFrame(y)))
bst1 = xgb.train({}, dtrain1, num_boost_round=10)
preds1 = bst1.predict(DMatrixT(X))
dtrain2 = DMatrixT(X, y)
bst2 = xgb.train({}, dtrain2, num_boost_round=10)
preds2 = bst2.predict(DMatrixT(X))
np.testing.assert_allclose(preds1, preds2)
preds3 = bst2.inplace_predict(table)
np.testing.assert_allclose(preds1, preds3)
assert bst2.feature_names == ["A", "B", "C"]
assert bst2.feature_types == ["int", "float", "int"]
def test_arrow_survival(self):
data = os.path.join(tm.data_dir(__file__), "veterans_lung_cancer.csv")
table = pc.read_csv(data)
y_lower_bound = table["Survival_label_lower_bound"]
y_upper_bound = table["Survival_label_upper_bound"]
X = table.drop(["Survival_label_lower_bound", "Survival_label_upper_bound"])
dtrain = xgb.DMatrix(
X, label_lower_bound=y_lower_bound, label_upper_bound=y_upper_bound
)
y_np_up = dtrain.get_float_info("label_upper_bound")
y_np_low = dtrain.get_float_info("label_lower_bound")
np.testing.assert_equal(y_np_up, y_upper_bound.to_pandas().values)
np.testing.assert_equal(y_np_low, y_lower_bound.to_pandas().values)
@pytest.mark.skipif(tm.is_windows(), reason="Rabit does not run on windows")
class TestArrowTableColumnSplit:
def test_arrow_table(self):
def verify_arrow_table():
df = pd.DataFrame(
[[0, 1, 2.0, 3.0], [1, 2, 3.0, 4.0]], columns=["a", "b", "c", "d"]
)
table = pa.Table.from_pandas(df)
dm = xgb.DMatrix(table, data_split_mode=DataSplitMode.COL)
assert dm.num_row() == 2
assert dm.num_col() == 4 * xgb.collective.get_world_size()
tm.run_with_rabit(world_size=3, test_fn=verify_arrow_table)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all datapoints to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (datapoints.Datapoint,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.datapoints.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image - this does not scale values.
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while preserving the value range.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
|
import datetime
import prisma.fields
import prisma.models
import pytest
import backend.server.v2.library.model as library_model
@pytest.mark.asyncio
async def test_agent_preset_from_db():
# Create mock DB agent
db_agent = prisma.models.AgentPreset(
id="test-agent-123",
createdAt=datetime.datetime.now(),
updatedAt=datetime.datetime.now(),
agentGraphId="agent-123",
agentGraphVersion=1,
name="Test Agent",
description="Test agent description",
isActive=True,
userId="test-user-123",
isDeleted=False,
InputPresets=[
prisma.models.AgentNodeExecutionInputOutput(
id="input-123",
time=datetime.datetime.now(),
name="input1",
data=prisma.Json({"type": "string", "value": "test value"}),
)
],
)
# Convert to LibraryAgentPreset
agent = library_model.LibraryAgentPreset.from_db(db_agent)
assert agent.id == "test-agent-123"
assert agent.graph_version == 1
assert agent.is_active is True
assert agent.name == "Test Agent"
assert agent.description == "Test agent description"
assert agent.inputs == {"input1": {"type": "string", "value": "test value"}}
|
import datetime
import prisma.fields
import prisma.models
import pytest
import backend.server.v2.library.model as library_model
from backend.util import json
@pytest.mark.asyncio
async def test_agent_preset_from_db():
# Create mock DB agent
db_agent = prisma.models.AgentPreset(
id="test-agent-123",
createdAt=datetime.datetime.now(),
updatedAt=datetime.datetime.now(),
agentId="agent-123",
agentVersion=1,
name="Test Agent",
description="Test agent description",
isActive=True,
userId="test-user-123",
isDeleted=False,
InputPresets=[
prisma.models.AgentNodeExecutionInputOutput(
id="input-123",
time=datetime.datetime.now(),
name="input1",
data=json.dumps({"type": "string", "value": "test value"}), # type: ignore
)
],
)
# Convert to LibraryAgentPreset
agent = library_model.LibraryAgentPreset.from_db(db_agent)
assert agent.id == "test-agent-123"
assert agent.agent_version == 1
assert agent.is_active is True
assert agent.name == "Test Agent"
assert agent.description == "Test agent description"
assert agent.inputs == {"input1": {"type": "string", "value": "test value"}}
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from ..registry import HOOKS
from ..utils import get_git_hash
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g. ``epoch``, ``iter``, ``max_epochs``, and ``max_iters`` for the
training state. Components that cannot access the runner can get runtime
information through the message hub.
"""
priority = 'VERY_HIGH'
def before_run(self, runner) -> None:
import mmengine
metainfo = dict(
cfg=runner.cfg.pretty_text,
seed=runner.seed,
experiment_name=runner.experiment_name,
mmengine_version=mmengine.__version__ + get_git_hash())
runner.message_hub.update_info_dict(metainfo)
def before_train(self, runner) -> None:
"""Update resumed training state."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
runner.message_hub.update_info('max_epochs', runner.max_epochs)
runner.message_hub.update_info('max_iters', runner.max_iters)
runner.message_hub.update_info(
'dataset_meta', runner.train_dataloader.dataset.metainfo)
def before_train_epoch(self, runner) -> None:
"""Update current epoch information before every epoch."""
runner.message_hub.update_info('epoch', runner.epoch)
def before_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None) -> None:
"""Update current iter and learning rate information before every
iteration."""
runner.message_hub.update_info('iter', runner.iter)
lr_dict = runner.optim_wrapper.get_lr()
assert isinstance(lr_dict, dict), (
'`runner.optim_wrapper.get_lr()` should return a dict '
'of learning rate when training with OptimWrapper(single '
'optimizer) or OptimWrapperDict(multiple optimizer), '
f'but got {type(lr_dict)} please check your optimizer '
'constructor return an `OptimWrapper` or `OptimWrapperDict` '
'instance')
for name, lr in lr_dict.items():
runner.message_hub.update_scalar(f'train/{name}', lr[0])
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ``log_vars`` in model outputs every iteration."""
if outputs is not None:
for key, value in outputs.items():
runner.message_hub.update_scalar(f'train/{key}', value)
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each validation epoch.
Args:
runner (Runner): The runner of the validation process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on validation dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'val/{key}', value)
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each test epoch.
Args:
runner (Runner): The runner of the testing process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on test dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'test/{key}', value)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class RuntimeInfoHook(Hook):
"""A hook that updates runtime information into message hub.
E.g. ``epoch``, ``iter``, ``max_epochs``, and ``max_iters`` for the
training state. Components that cannot access the runner can get runtime
information through the message hub.
"""
priority = 'VERY_HIGH'
def before_train(self, runner) -> None:
"""Update resumed training state."""
runner.message_hub.update_info('epoch', runner.epoch)
runner.message_hub.update_info('iter', runner.iter)
runner.message_hub.update_info('max_epochs', runner.max_epochs)
runner.message_hub.update_info('max_iters', runner.max_iters)
def before_train_epoch(self, runner) -> None:
"""Update current epoch information before every epoch."""
runner.message_hub.update_info('epoch', runner.epoch)
def before_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None) -> None:
"""Update current iter and learning rate information before every
iteration."""
runner.message_hub.update_info('iter', runner.iter)
lr_dict = runner.optim_wrapper.get_lr()
assert isinstance(lr_dict, dict), (
'`runner.optim_wrapper.get_lr()` should return a dict '
'of learning rate when training with OptimWrapper(single '
'optimizer) or OptimWrapperDict(multiple optimizer), '
f'but got {type(lr_dict)} please check your optimizer '
'constructor return an `OptimWrapper` or `OptimWrapperDict` '
'instance')
for name, lr in lr_dict.items():
runner.message_hub.update_scalar(f'train/{name}', lr[0])
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ``log_vars`` in model outputs every iteration."""
if outputs is not None:
for key, value in outputs.items():
runner.message_hub.update_scalar(f'train/{key}', value)
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each validation epoch.
Args:
runner (Runner): The runner of the validation process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on validation dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'val/{key}', value)
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""All subclasses should override this method, if they need any
operations after each test epoch.
Args:
runner (Runner): The runner of the testing process.
metrics (Dict[str, float], optional): Evaluation results of all
metrics on test dataset. The keys are the names of the
metrics, and the values are corresponding results.
"""
if metrics is not None:
for key, value in metrics.items():
runner.message_hub.update_scalar(f'test/{key}', value)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from executor.audioclip_image import AudioCLIPImageEncoder
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from executor.audioclip_image import AudioCLIPImageEncoder
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
'--volumes=.cache:/workdir/.cache',
],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA,
StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule
from .utils import detect_anomalous_params, merge_dict, stack_batch
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'StochasticWeightAverage',
'ExponentialMovingAverage', 'MomentumAnnealingEMA', 'BaseModel',
'BaseDataPreprocessor', 'ImgDataPreprocessor',
'MMSeparateDistributedDataParallel', 'BaseModule', 'stack_batch',
'merge_dict', 'detect_anomalous_params'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .averaged_model import (ExponentialMovingAverage, MomentumAnnealingEMA,
StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule
from .utils import detect_anomalous_params, merge_dict, stach_batch_imgs
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'StochasticWeightAverage',
'ExponentialMovingAverage', 'MomentumAnnealingEMA', 'BaseModel',
'BaseDataPreprocessor', 'ImgDataPreprocessor',
'MMSeparateDistributedDataParallel', 'BaseModule', 'stach_batch_imgs',
'merge_dict', 'detect_anomalous_params'
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True)
def disable_implicit_token(monkeypatch):
monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", True)
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
AutoencoderKL,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
load_hf_numpy,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
enable_full_determinism()
@slow
@require_torch_accelerator
class AutoencoderKLSingleFileTests(unittest.TestCase):
model_class = AutoencoderKL
ckpt_path = (
"https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"
)
repo_id = "stabilityai/sd-vae-ft-mse"
main_input_name = "sample"
base_precision = 1e-2
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
dtype = torch.float16 if fp16 else torch.float32
image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return image
def test_single_file_inference_same_as_pretrained(self):
model_1 = self.model_class.from_pretrained(self.repo_id).to(torch_device)
model_2 = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id).to(torch_device)
image = self.get_sd_image(33)
generator = torch.Generator(torch_device)
with torch.no_grad():
sample_1 = model_1(image, generator=generator.manual_seed(0)).sample
sample_2 = model_2(image, generator=generator.manual_seed(0)).sample
assert sample_1.shape == sample_2.shape
output_slice_1 = sample_1.flatten().float().cpu()
output_slice_2 = sample_2.flatten().float().cpu()
assert numpy_cosine_similarity_distance(output_slice_1, output_slice_2) < 1e-4
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id)
assert model_default.config.scaling_factor == 0.18215
assert model_default.config.sample_size == 256
assert model_default.dtype == torch.float32
scaling_factor = 2.0
sample_size = 512
torch_dtype = torch.float16
model = self.model_class.from_single_file(
self.ckpt_path,
config=self.repo_id,
sample_size=sample_size,
scaling_factor=scaling_factor,
torch_dtype=torch_dtype,
)
assert model.config.scaling_factor == scaling_factor
assert model.config.sample_size == sample_size
assert model.dtype == torch_dtype
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
AutoencoderKL,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_hf_numpy,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
enable_full_determinism()
@slow
@require_torch_gpu
class AutoencoderKLSingleFileTests(unittest.TestCase):
model_class = AutoencoderKL
ckpt_path = (
"https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"
)
repo_id = "stabilityai/sd-vae-ft-mse"
main_input_name = "sample"
base_precision = 1e-2
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_file_format(self, seed, shape):
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
dtype = torch.float16 if fp16 else torch.float32
image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
return image
def test_single_file_inference_same_as_pretrained(self):
model_1 = self.model_class.from_pretrained(self.repo_id).to(torch_device)
model_2 = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id).to(torch_device)
image = self.get_sd_image(33)
generator = torch.Generator(torch_device)
with torch.no_grad():
sample_1 = model_1(image, generator=generator.manual_seed(0)).sample
sample_2 = model_2(image, generator=generator.manual_seed(0)).sample
assert sample_1.shape == sample_2.shape
output_slice_1 = sample_1.flatten().float().cpu()
output_slice_2 = sample_2.flatten().float().cpu()
assert numpy_cosine_similarity_distance(output_slice_1, output_slice_2) < 1e-4
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id)
model_single_file = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_arguments(self):
model_default = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id)
assert model_default.config.scaling_factor == 0.18215
assert model_default.config.sample_size == 256
assert model_default.dtype == torch.float32
scaling_factor = 2.0
sample_size = 512
torch_dtype = torch.float16
model = self.model_class.from_single_file(
self.ckpt_path,
config=self.repo_id,
sample_size=sample_size,
scaling_factor=scaling_factor,
torch_dtype=torch_dtype,
)
assert model.config.scaling_factor == scaling_factor
assert model.config.sample_size == sample_size
assert model.dtype == torch_dtype
|
from typing import Union, BinaryIO, TYPE_CHECKING
from docarray.document.mixins.helper import _uri_to_blob, _get_file_context
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class UriFileMixin:
"""Provide helper functions for :class:`Document` to dump content to a file."""
def save_uri_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.uri` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
blob = _uri_to_blob(self.uri)
fp.write(blob)
return self
|
from typing import Union, BinaryIO, TYPE_CHECKING
from docarray.document.mixins.helper import _uri_to_blob, _get_file_context
if TYPE_CHECKING:
from docarray.typing import T
class UriFileMixin:
"""Provide helper functions for :class:`Document` to dump content to a file."""
def save_uri_to_file(self: 'T', file: Union[str, BinaryIO]) -> 'T':
"""Save :attr:`.uri` into a file
:param file: File or filename to which the data is saved.
:return: itself after processed
"""
fp = _get_file_context(file)
with fp:
blob = _uri_to_blob(self.uri)
fp.write(blob)
return self
|
from __future__ import annotations
from typing import Any
import torch
from torch import nn
from transformers import AutoConfig, AutoModelForMaskedLM, AutoTokenizer
class MLMTransformer(nn.Module):
"""A minimal Transformer model that uses MLM (Masked Language Modeling).
This model implements only the essential functionality needed for MLM,
without inheriting from the base Transformer class.
Args:
model_name_or_path: Hugging Face models name
max_seq_length: Truncate any inputs longer than max_seq_length
model_args: Keyword arguments passed to the Hugging Face Transformers model
tokenizer_args: Keyword arguments passed to the Hugging Face Transformers tokenizer
config_args: Keyword arguments passed to the Hugging Face Transformers config
cache_dir: Cache dir for Hugging Face Transformers to store/load models
do_lower_case: If true, lowercases the input
tokenizer_name_or_path: Name or path of the tokenizer
"""
def __init__(
self,
model_name_or_path: str,
max_seq_length: int | None = None,
model_args: dict[str, Any] | None = None,
tokenizer_args: dict[str, Any] | None = None,
config_args: dict[str, Any] | None = None,
cache_dir: str | None = None,
do_lower_case: bool = False,
tokenizer_name_or_path: str | None = None,
) -> None:
super().__init__()
# Set default values for optional arguments
if model_args is None:
model_args = {}
if tokenizer_args is None:
tokenizer_args = {}
if config_args is None:
config_args = {}
# Load config
self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir, **config_args)
# Load tokenizer
if max_seq_length is not None and "model_max_length" not in tokenizer_args:
tokenizer_args["model_max_length"] = max_seq_length
self.tokenizer = AutoTokenizer.from_pretrained(
(tokenizer_name_or_path if tokenizer_name_or_path is not None else model_name_or_path),
cache_dir=cache_dir,
**tokenizer_args,
)
# Set max_seq_length
self.max_seq_length = max_seq_length
if max_seq_length is None:
if hasattr(self.config, "max_position_embeddings") and hasattr(self.tokenizer, "model_max_length"):
self.max_seq_length = min(self.config.max_position_embeddings, self.tokenizer.model_max_length)
# Load MLM model
self.auto_model = AutoModelForMaskedLM.from_pretrained(
model_name_or_path, config=self.config, cache_dir=cache_dir, **model_args
)
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features
Returns:
Dictionary containing token embeddings and MLM logits
"""
# Get MLM outputs
mlm_outputs = self.auto_model(**features)
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = mlm_outputs.logits
return {"mlm_logits": mlm_logits}
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the token embeddings"""
return self.auto_model.config.hidden_size
def tokenize(self, texts: list[str], padding: bool = True) -> dict[str, torch.Tensor]:
"""Tokenize the input texts.
Args:
texts: List of texts to tokenize
padding: Whether to pad the sequences
Returns:
Dictionary containing tokenized inputs
"""
# Check if the model is a DistilBERT model
is_distilbert = "distilbert" in self.auto_model.config.model_type.lower()
# For DistilBERT models, we need to exclude token_type_ids
if is_distilbert:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
return_token_type_ids=False, # Exclude token_type_ids for DistilBERT
)
else:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
)
|
from __future__ import annotations
from typing import Any
import torch
from torch import nn
from transformers import AutoConfig, AutoModelForMaskedLM, AutoTokenizer
class MLMTransformer(nn.Module):
"""A minimal Transformer model that uses MLM (Masked Language Modeling).
This model implements only the essential functionality needed for MLM,
without inheriting from the base Transformer class.
Args:
model_name_or_path: Hugging Face models name
max_seq_length: Truncate any inputs longer than max_seq_length
model_args: Keyword arguments passed to the Hugging Face Transformers model
tokenizer_args: Keyword arguments passed to the Hugging Face Transformers tokenizer
config_args: Keyword arguments passed to the Hugging Face Transformers config
cache_dir: Cache dir for Hugging Face Transformers to store/load models
do_lower_case: If true, lowercases the input
tokenizer_name_or_path: Name or path of the tokenizer
"""
def __init__(
self,
model_name_or_path: str,
max_seq_length: int | None = None,
model_args: dict[str, Any] | None = None,
tokenizer_args: dict[str, Any] | None = None,
config_args: dict[str, Any] | None = None,
cache_dir: str | None = None,
do_lower_case: bool = False,
tokenizer_name_or_path: str | None = None,
) -> None:
super().__init__()
# Set default values for optional arguments
if model_args is None:
model_args = {}
if tokenizer_args is None:
tokenizer_args = {}
if config_args is None:
config_args = {}
# Load config
self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir, **config_args)
# Load tokenizer
if max_seq_length is not None and "model_max_length" not in tokenizer_args:
tokenizer_args["model_max_length"] = max_seq_length
self.tokenizer = AutoTokenizer.from_pretrained(
(tokenizer_name_or_path if tokenizer_name_or_path is not None else model_name_or_path),
cache_dir=cache_dir,
**tokenizer_args,
)
# Set max_seq_length
self.max_seq_length = max_seq_length
if max_seq_length is None:
if hasattr(self.config, "max_position_embeddings") and hasattr(self.tokenizer, "model_max_length"):
self.max_seq_length = min(self.config.max_position_embeddings, self.tokenizer.model_max_length)
# Load MLM model
self.auto_model = AutoModelForMaskedLM.from_pretrained(
model_name_or_path, config=self.config, cache_dir=cache_dir, **model_args
)
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features
Returns:
Dictionary containing token embeddings and MLM logits
"""
# Get MLM outputs
mlm_outputs = self.auto_model(**features, output_hidden_states=True)
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = mlm_outputs.logits
return {"mlm_logits": mlm_logits}
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the token embeddings"""
return self.auto_model.config.hidden_size
def tokenize(self, texts: list[str], padding: bool = True) -> dict[str, torch.Tensor]:
"""Tokenize the input texts.
Args:
texts: List of texts to tokenize
padding: Whether to pad the sequences
Returns:
Dictionary containing tokenized inputs
"""
# Check if the model is a DistilBERT model
is_distilbert = "distilbert" in self.auto_model.config.model_type.lower()
# For DistilBERT models, we need to exclude token_type_ids
if is_distilbert:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
return_token_type_ids=False, # Exclude token_type_ids for DistilBERT
)
else:
return self.tokenizer(
texts,
padding=padding,
truncation=True,
max_length=self.max_seq_length,
return_tensors="pt",
)
|
from . import InputExample
import gzip
import os
class NLIDataReader(object):
"""Reads in the Stanford NLI dataset and the MultiGenre NLI dataset"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
from . import InputExample
import gzip
import os
class NLIDataReader(object):
"""
Reads in the Stanford NLI dataset and the MultiGenre NLI dataset
"""
def __init__(self, dataset_folder):
self.dataset_folder = dataset_folder
def get_examples(self, filename, max_examples=0):
"""
data_splits specified which data split to use (train, dev, test).
Expects that self.dataset_folder contains the files s1.$data_split.gz, s2.$data_split.gz,
labels.$data_split.gz, e.g., for the train split, s1.train.gz, s2.train.gz, labels.train.gz
"""
s1 = gzip.open(os.path.join(self.dataset_folder, "s1." + filename), mode="rt", encoding="utf-8").readlines()
s2 = gzip.open(os.path.join(self.dataset_folder, "s2." + filename), mode="rt", encoding="utf-8").readlines()
labels = gzip.open(
os.path.join(self.dataset_folder, "labels." + filename), mode="rt", encoding="utf-8"
).readlines()
examples = []
id = 0
for sentence_a, sentence_b, label in zip(s1, s2, labels):
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence_a, sentence_b], label=self.map_label(label)))
if 0 < max_examples <= len(examples):
break
return examples
@staticmethod
def get_labels():
return {"contradiction": 0, "entailment": 1, "neutral": 2}
def get_num_labels(self):
return len(self.get_labels())
def map_label(self, label):
return self.get_labels()[label.strip().lower()]
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(shape))
assert tensor.__class__.__name__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__qualname__ == 'TorchTensor[3, 224, 224]'
assert f'{tensor[0][0][0]}' == 'TorchTensor[3, 224, 224](0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
def test_parametrized_subclass():
c1 = TorchTensor[128]
c2 = TorchTensor[128]
assert issubclass(c1, c2)
assert issubclass(c1, TorchTensor)
assert issubclass(c1, torch.Tensor)
assert not issubclass(c1, TorchTensor[256])
def test_parametrized_instance():
t = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(t, TorchTensor[128])
assert isinstance(t, TorchTensor)
assert isinstance(t, torch.Tensor)
assert not isinstance(t, TorchTensor[256])
def test_parametrized_equality():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t3 = parse_obj_as(TorchTensor[256], torch.zeros(256))
assert (t1 == t2).all()
assert not t1 == t3
def test_parametrized_operations():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, torch.Tensor)
assert isinstance(t_result, TorchTensor)
assert isinstance(t_result, TorchTensor[128])
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(shape))
assert tensor.__class__.__name__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__qualname__ == 'TorchTensor[3, 224, 224]'
assert f'{tensor[0][0][0]}' == 'TorchTensor[3, 224, 224](0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_val.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v1_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
train_cfg = dict(val_interval=24)
|
_base_ = [
'../_base_/models/cascade-mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_train.json',
data_prefix=dict(img=''),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/lvis_v1_val.json',
data_prefix=dict(img='')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='LVISMetric',
ann_file=data_root + 'annotations/lvis_v1_val.json',
metric=['bbox', 'segm'])
test_evaluator = val_evaluator
train_cfg = dict(val_interval=24)
|
import types
from typing_extensions import TYPE_CHECKING
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.image.image_tensorflow_tensor import ( # noqa
ImageTensorFlowTensor,
)
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor # noqa
__all__ = ['ImageNdArray', 'ImageTensor']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'ImageTorchTensor':
import_library('torch', raise_error=True)
import docarray.typing.tensor.image.image_torch_tensor as lib
elif name == 'ImageTensorFlowTensor':
import_library('tensorflow', raise_error=True)
import docarray.typing.tensor.image.image_tensorflow_tensor as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
from docarray.typing.tensor.image.image_ndarray import ImageNdArray
from docarray.typing.tensor.image.image_tensor import ImageTensor
__all__ = ['ImageNdArray', 'ImageTensor']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.image.image_torch_tensor import ImageTorchTensor # noqa
__all__.extend(['ImageTorchTensor'])
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.image.image_tensorflow_tensor import ( # noqa
ImageTensorFlowTensor,
)
__all__.extend(['ImageTensorFlowTensor'])
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean={{_base_.model.data_preprocessor.mean}},
to_rgb={{_base_.model.data_preprocessor.bgr_to_rgb}},
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=8,
num_workers=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
input_size = 300
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=[123.675, 116.28, 103.53],
to_rgb=True,
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(type='RandomFlip', prob=0.5),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(input_size, input_size), keep_ratio=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=8,
num_workers=2,
batch_sampler=None,
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=5,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=8,
num_workers=2,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4))
custom_hooks = [
dict(type='NumClassCheckHook'),
dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW')
]
# TODO support auto_scale_lr
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.2.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import mmengine
from mmengine.utils import digit_version
from .version import __version__, version_info
mmcv_minimum_version = '2.0.0rc4'
mmcv_maximum_version = '2.1.0'
mmcv_version = digit_version(mmcv.__version__)
mmengine_minimum_version = '0.7.1'
mmengine_maximum_version = '1.0.0'
mmengine_version = digit_version(mmengine.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version < digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.'
assert (mmengine_version >= digit_version(mmengine_minimum_version)
and mmengine_version < digit_version(mmengine_maximum_version)), \
f'MMEngine=={mmengine.__version__} is used but incompatible. ' \
f'Please install mmengine>={mmengine_minimum_version}, ' \
f'<{mmengine_maximum_version}.'
__all__ = ['__version__', 'version_info', 'digit_version']
|
_base_ = ['./mask2former_swin-b-p4-w12-384_8xb2-lsj-50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
|
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
|
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway # keep import here for backwards compatibility
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.http import HTTPServer
__all__ = ['HTTPGateway']
class HTTPGateway(HTTPServer, BaseGateway):
"""
:class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app
"""
pass
|
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
__all__ = ['HTTPGateway']
class HTTPGateway(FastAPIBaseGateway):
"""
:class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app
"""
@property
def app(self):
"""Get the default base API app for HTTPGateway
:return: Return a FastAPI app for the default HTTPGateway
"""
return self._request_handler._http_fastapi_default_app(title=self.title,
description=self.description,
no_crud_endpoints=self.no_crud_endpoints,
no_debug_endpoints=self.no_debug_endpoints,
expose_endpoints=self.expose_endpoints,
expose_graphql_endpoint=self.expose_graphql_endpoint,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
cors=self.cors)
|
"""Argparser module for WorkerRuntime"""
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
mixin_base_runtime_parser(gp)
|
"""Argparser module for WorkerRuntime"""
from jina import __default_host__, helper
from jina.enums import PollingType
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.runtimes.runtime import mixin_base_runtime_parser
def mixin_worker_runtime_parser(parser):
"""Mixing in arguments required by :class:`WorkerRuntime` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='WorkerRuntime')
from jina import __default_executor__
gp.add_argument(
'--uses',
type=str,
default=__default_executor__,
help='''
The config of the executor, it could be one of the followings:
* the string literal of an Executor class name
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
''',
)
gp.add_argument(
'--uses-with',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `with` configuration in `uses`
''',
)
gp.add_argument(
'--uses-metas',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `metas` configuration in `uses`
''',
)
gp.add_argument(
'--uses-requests',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='''
Dictionary of keyword arguments that will override the `requests` configuration in `uses`
''',
)
gp.add_argument(
'--py-modules',
type=str,
nargs='*',
metavar='PATH',
help='''
The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/executor-files/>`__
''',
)
gp.add_argument(
'--output-array-type',
type=str,
default=None,
help='''
The type of array `tensor` and `embedding` will be serialized to.
Supports the same types as `docarray.to_protobuf(.., ndarray_type=...)`, which can be found
`here <https://docarray.jina.ai/fundamentals/document/serialization/#from-to-protobuf>`.
Defaults to retaining whatever type is returned by the Executor.
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
mixin_base_runtime_parser(gp)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SendEvent
from langchain_community.tools.office365.send_event import SendEventSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SendEventSchema": "langchain_community.tools.office365.send_event",
"O365SendEvent": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365SendEvent",
"SendEventSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import O365SendEvent
from langchain_community.tools.office365.send_event import SendEventSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SendEventSchema": "langchain_community.tools.office365.send_event",
"O365SendEvent": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SendEventSchema",
"O365SendEvent",
]
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
data_preprocessor=dict(batch_augments=[
dict(
type='BatchSyncRandomResize',
random_size_range=(320, 640),
size_divisor=32,
interval=10)
]),
backbone=dict(deepen_factor=0.33, widen_factor=0.375),
neck=dict(in_channels=[96, 192, 384], out_channels=96),
bbox_head=dict(in_channels=96, feat_channels=96))
img_scale = (640, 640) # height, width
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.5, 1.5),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
# Resize and Pad are for the last 15 epochs when Mosaic and
# RandomAffine are closed by YOLOXModeSwitchHook.
dict(type='Resize', scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(416, 416), keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
# auto_scale_lr = dict(base_batch_size=64)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import YOLOFHead
class TestYOLOFHead(TestCase):
def test_yolof_head_loss(self):
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
yolof_head = YOLOFHead(
num_classes=4,
in_channels=1,
feat_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = yolof_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import Config
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import YOLOFHead
class TestYOLOFHead(TestCase):
def test_yolof_head_loss(self):
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
yolof_head = YOLOFHead(
num_classes=4,
in_channels=1,
feat_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = yolof_head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = yolof_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class FluxTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = FluxTransformer2DModel
ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors"
alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"]
repo_id = "black-forest-labs/FLUX.1-dev"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import (
FluxTransformer2DModel,
)
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
torch_device,
)
enable_full_determinism()
@require_torch_accelerator
class FluxTransformer2DModelSingleFileTests(unittest.TestCase):
model_class = FluxTransformer2DModel
ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors"
alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"]
repo_id = "black-forest-labs/FLUX.1-dev"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components(self):
model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer")
model_single_file = self.model_class.from_single_file(self.ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_checkpoint_loading(self):
for ckpt_path in self.alternate_keys_ckpt_paths:
backend_empty_cache(torch_device)
model = self.model_class.from_single_file(ckpt_path)
del model
gc.collect()
backend_empty_cache(torch_device)
|
from enum import Enum
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
similarity_score_threshold = "similarity_score_threshold"
"""Similarity search with a score threshold."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore] = None
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@model_validator(mode="before")
@classmethod
def shim_docstore(cls, values: dict) -> Any:
byte_store = values.get("byte_store")
docstore = values.get("docstore")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
msg = "You must pass a `byte_store` parameter."
raise ValueError(msg)
values["docstore"] = docstore
return values
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query,
**self.search_kwargs,
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query,
**self.search_kwargs,
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = await self.vectorstore.amax_marginal_relevance_search(
query,
**self.search_kwargs,
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query,
**self.search_kwargs,
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = await self.vectorstore.asimilarity_search(
query,
**self.search_kwargs,
)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = await self.docstore.amget(ids)
return [d for d in docs if d is not None]
|
from enum import Enum
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
similarity_score_threshold = "similarity_score_threshold"
"""Similarity search with a score threshold."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore] = None
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@model_validator(mode="before")
@classmethod
def shim_docstore(cls, values: dict) -> Any:
byte_store = values.get("byte_store")
docstore = values.get("docstore")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
msg = "You must pass a `byte_store` parameter."
raise Exception(msg)
values["docstore"] = docstore
return values
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query,
**self.search_kwargs,
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query,
**self.search_kwargs,
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = await self.vectorstore.amax_marginal_relevance_search(
query,
**self.search_kwargs,
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query,
**self.search_kwargs,
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = await self.vectorstore.asimilarity_search(
query,
**self.search_kwargs,
)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = await self.docstore.amget(ids)
return [d for d in docs if d is not None]
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(self._map_id(x))
elif isinstance(x, Document):
return self._client.data_object.exists(self._map_id(x.id))
else:
return False
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(batch_size=50) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(self._map_id(x))
elif isinstance(x, Document):
return self._client.data_object.exists(self._map_id(x.id))
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayWeaviate` object"""
super().__del__()
if (
not self._persist
and len(_REGISTRY[self.__class__.__name__][self._class_name]) == 1
):
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
_REGISTRY[self.__class__.__name__][self._class_name].remove(self)
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(batch_size=50) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import librosa
import pytest
from executor.vggish import vggish_input
from jina import Document, DocumentArray, Flow
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
resp = f.post(on='test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sample_rate = librosa.load(
os.path.join(cur_dir, '../test_data/sample.wav')
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import librosa
import pytest
from jina import Document, DocumentArray, Flow
from ...vggish import vggish_input
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
resp = f.post(on='test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sample_rate = librosa.load(
os.path.join(cur_dir, '../test_data/sample.wav')
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
import warnings
from typing import Optional, Union, TYPE_CHECKING, Callable
import numpy as np
from docarray.score import NamedScore
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class EvaluationMixin:
"""A mixin that provides ranking evaluation functionality to DocumentArrayLike objects"""
def evaluate(
self,
other: 'DocumentArray',
metric: Union[str, Callable[..., float]],
hash_fn: Optional[Callable[['Document'], str]] = None,
metric_name: Optional[str] = None,
strict: bool = True,
**kwargs,
) -> Optional[float]:
"""Compute ranking evaluation metrics for a given `DocumentArray` when compared with a groundtruth.
This implementation expects to provide a `groundtruth` DocumentArray that is structurally identical to `self`. It is based
on comparing the `matches` of `documents` inside the `DocumentArray.
This method will fill the `evaluations` field of Documents inside this `DocumentArray` and will return the average of the computations
:param other: The groundtruth DocumentArray` that the `DocumentArray` compares to.
:param metric: The name of the metric, or multiple metrics to be computed
:param hash_fn: The function used for identifying the uniqueness of Documents. If not given, then ``Document.id`` is used.
:param metric_name: If provided, the results of the metrics computation will be stored in the `evaluations` field of each Document. If not provided, the name will be computed based on the metrics name.
:param strict: If set, then left and right sides are required to be fully aligned: on the length, and on the semantic of length. These are preventing
you to evaluate on irrelevant matches accidentally.
:param kwargs: Additional keyword arguments to be passed to `metric_fn`
:return: The average evaluation computed or a list of them if multiple metrics are required
"""
if strict:
self._check_length(len(other))
if hash_fn is None:
hash_fn = lambda d: d.id
if callable(metric):
metric_fn = metric
elif isinstance(metric, str):
from docarray.math import evaluation
metric_fn = getattr(evaluation, metric)
metric_name = metric_name or metric_fn.__name__
results = []
caller_max_rel = kwargs.pop('max_rel', None)
for d, gd in zip(self, other):
max_rel = caller_max_rel or len(gd.matches)
if strict and hash_fn(d) != hash_fn(gd):
raise ValueError(
f'Document {d} from the left-hand side and '
f'{gd} from the right-hand are not hashed to the same value. '
f'This means your left and right DocumentArray may not be aligned; or it means your '
f'`hash_fn` is badly designed.'
)
if not d.matches or not gd.matches:
raise ValueError(
f'Document {d!r} or {gd!r} has no matches, please check your Document'
)
targets = gd.matches[:max_rel]
desired = {hash_fn(m) for m in targets}
if len(desired) != len(targets):
warnings.warn(
f'{hash_fn!r} may not be valid, as it maps multiple Documents into the same hash. '
f'Evaluation results may be affected'
)
binary_relevance = [1 if hash_fn(m) in desired else 0 for m in d.matches]
r = metric_fn(binary_relevance, max_rel=max_rel, **kwargs)
d.evaluations[metric_name] = NamedScore(
value=r, op_name=str(metric_fn), ref_id=d.id
)
results.append(r)
if results:
return float(np.mean(results))
|
import warnings
from typing import Optional, Union, TYPE_CHECKING, Callable
import numpy as np
from ...score import NamedScore
if TYPE_CHECKING:
from ... import Document, DocumentArray
class EvaluationMixin:
"""A mixin that provides ranking evaluation functionality to DocumentArrayLike objects"""
def evaluate(
self,
other: 'DocumentArray',
metric: Union[str, Callable[..., float]],
hash_fn: Optional[Callable[['Document'], str]] = None,
metric_name: Optional[str] = None,
strict: bool = True,
**kwargs,
) -> Optional[float]:
"""Compute ranking evaluation metrics for a given `DocumentArray` when compared with a groundtruth.
This implementation expects to provide a `groundtruth` DocumentArray that is structurally identical to `self`. It is based
on comparing the `matches` of `documents` inside the `DocumentArray.
This method will fill the `evaluations` field of Documents inside this `DocumentArray` and will return the average of the computations
:param other: The groundtruth DocumentArray` that the `DocumentArray` compares to.
:param metric: The name of the metric, or multiple metrics to be computed
:param hash_fn: The function used for identifying the uniqueness of Documents. If not given, then ``Document.id`` is used.
:param metric_name: If provided, the results of the metrics computation will be stored in the `evaluations` field of each Document. If not provided, the name will be computed based on the metrics name.
:param strict: If set, then left and right sides are required to be fully aligned: on the length, and on the semantic of length. These are preventing
you to evaluate on irrelevant matches accidentally.
:param kwargs: Additional keyword arguments to be passed to `metric_fn`
:return: The average evaluation computed or a list of them if multiple metrics are required
"""
if strict:
self._check_length(len(other))
if hash_fn is None:
hash_fn = lambda d: d.id
if callable(metric):
metric_fn = metric
elif isinstance(metric, str):
from ...math import evaluation
metric_fn = getattr(evaluation, metric)
metric_name = metric_name or metric_fn.__name__
results = []
caller_max_rel = kwargs.pop('max_rel', None)
for d, gd in zip(self, other):
max_rel = caller_max_rel or len(gd.matches)
if strict and hash_fn(d) != hash_fn(gd):
raise ValueError(
f'Document {d} from the left-hand side and '
f'{gd} from the right-hand are not hashed to the same value. '
f'This means your left and right DocumentArray may not be aligned; or it means your '
f'`hash_fn` is badly designed.'
)
if not d.matches or not gd.matches:
raise ValueError(
f'Document {d!r} or {gd!r} has no matches, please check your Document'
)
targets = gd.matches[:max_rel]
desired = {hash_fn(m) for m in targets}
if len(desired) != len(targets):
warnings.warn(
f'{hash_fn!r} may not be valid, as it maps multiple Documents into the same hash. '
f'Evaluation results may be affected'
)
binary_relevance = [1 if hash_fn(m) in desired else 0 for m in d.matches]
r = metric_fn(binary_relevance, max_rel=max_rel, **kwargs)
d.evaluations[metric_name] = NamedScore(
value=r, op_name=str(metric_fn), ref_id=d.id
)
results.append(r)
if results:
return float(np.mean(results))
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[proto]"',
'lz4': '"docarray[proto]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
import importlib
import os
import re
import types
from typing import Any, Optional
import numpy as np
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
torch_imported = True
try:
import tensorflow as tf # type: ignore # noqa: F401
except (ImportError, TypeError):
tf_imported = False
else:
tf_imported = True
INSTALL_INSTRUCTIONS = {
'google.protobuf': '"docarray[common]"',
'lz4': '"docarray[common]"',
'pandas': '"docarray[pandas]"',
'PIL': '"docarray[image]"',
'pydub': '"docarray[audio]"',
'av': '"docarray[video]"',
'trimesh': '"docarray[mesh]"',
'hnswlib': '"docarray[hnswlib]"',
'elasticsearch': '"docarray[elasticsearch]"',
'weaviate': '"docarray[weaviate]"',
'qdrant_client': '"docarray[qdrant]"',
'fastapi': '"docarray[web]"',
'torch': '"docarray[torch]"',
'tensorflow': 'protobuf==3.19.0 tensorflow',
'hubble': '"docarray[jac]"',
'smart_open': '"docarray[aws]"',
'boto3': '"docarray[aws]"',
'botocore': '"docarray[aws]"',
}
def import_library(
package: str, raise_error: bool = True
) -> Optional[types.ModuleType]:
lib: Optional[types.ModuleType]
try:
lib = importlib.import_module(package)
except (ModuleNotFoundError, ImportError):
lib = None
if lib is None and raise_error:
raise ImportError(
f'The following required library is not installed: {package} \n'
f'To install all necessary libraries, run: `pip install {INSTALL_INSTRUCTIONS[package]}`.'
)
else:
return lib
def _get_path_from_docarray_root_level(file_path: str) -> str:
path = os.path.dirname(file_path)
rel_path = re.sub('(?s:.*)docarray', 'docarray', path).replace('/', '.')
return rel_path
def is_torch_available():
return torch_imported
def is_tf_available():
return tf_imported
def is_np_int(item: Any) -> bool:
dtype = getattr(item, 'dtype', None)
ndim = getattr(item, 'ndim', None)
if dtype is not None and ndim is not None:
try:
return ndim == 0 and np.issubdtype(dtype, np.integer)
except TypeError:
return False
return False # this is unreachable, but mypy wants it
def is_notebook() -> bool:
"""
Check if we're running in a Jupyter notebook, using magic command
`get_ipython` that only available in Jupyter.
:return: True if run in a Jupyter notebook else False.
"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
except NameError:
return False
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'Shell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# for type checking and IDE support, we include the imports here
# but we don't want to eagerly import them at runtime
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = [
"build_extra_kwargs",
"StrictFormatter",
"check_package_version",
"convert_to_secret_str",
"formatter",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"get_from_env",
"get_from_dict_or_env",
"stringify_dict",
"comma_list",
"stringify_value",
"pre_init",
"batch_iterate",
"abatch_iterate",
"from_env",
"secret_from_env",
]
_dynamic_imports = {
"image": "__module__",
"abatch_iterate": "aiter",
"get_from_dict_or_env": "env",
"get_from_env": "env",
"StrictFormatter": "formatting",
"formatter": "formatting",
"get_bolded_text": "input",
"get_color_mapping": "input",
"get_colored_text": "input",
"print_text": "input",
"batch_iterate": "iter",
"try_load_from_hub": "loading",
"pre_init": "pydantic",
"comma_list": "strings",
"stringify_dict": "strings",
"stringify_value": "strings",
"build_extra_kwargs": "utils",
"check_package_version": "utils",
"convert_to_secret_str": "utils",
"from_env": "utils",
"get_pydantic_field_names": "utils",
"guard_import": "utils",
"mock_now": "utils",
"secret_from_env": "utils",
"xor_args": "utils",
"raise_for_status_with_text": "utils",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Utility functions** for LangChain.
These functions do not depend on any other LangChain module.
"""
from langchain_core.utils import image
from langchain_core.utils.aiter import abatch_iterate
from langchain_core.utils.env import get_from_dict_or_env, get_from_env
from langchain_core.utils.formatting import StrictFormatter, formatter
from langchain_core.utils.input import (
get_bolded_text,
get_color_mapping,
get_colored_text,
print_text,
)
from langchain_core.utils.iter import batch_iterate
from langchain_core.utils.loading import try_load_from_hub
from langchain_core.utils.pydantic import pre_init
from langchain_core.utils.strings import comma_list, stringify_dict, stringify_value
from langchain_core.utils.utils import (
build_extra_kwargs,
check_package_version,
convert_to_secret_str,
from_env,
get_pydantic_field_names,
guard_import,
mock_now,
raise_for_status_with_text,
secret_from_env,
xor_args,
)
__all__ = [
"build_extra_kwargs",
"StrictFormatter",
"check_package_version",
"convert_to_secret_str",
"formatter",
"get_bolded_text",
"get_color_mapping",
"get_colored_text",
"get_pydantic_field_names",
"guard_import",
"mock_now",
"print_text",
"raise_for_status_with_text",
"xor_args",
"try_load_from_hub",
"image",
"get_from_env",
"get_from_dict_or_env",
"stringify_dict",
"comma_list",
"stringify_value",
"pre_init",
"batch_iterate",
"abatch_iterate",
"from_env",
"secret_from_env",
]
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
with GatewayRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--protocol',
protocol,
]
)
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['http'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser, set_pod_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
def _create_worker_runtime(port, name='', executor=None):
args = set_pod_parser().parse_args([])
args.port = port
args.name = name
if executor:
args.uses = executor
with WorkerRuntime(args) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
if protocol == 'http':
gateway_runtime = HTTPGatewayRuntime
elif protocol == 'websocket':
gateway_runtime = WebSocketGatewayRuntime
else:
gateway_runtime = GRPCGatewayRuntime
with gateway_runtime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
]
)
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
AsyncNewLoopRuntime.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='ImageUrl')
IMAGE_FILE_FORMATS = ('png', 'jpeg', 'jpg')
@_register_proto(proto_type_name='image_url')
class ImageUrl(AnyUrl):
"""
URL to a .png, .jpeg, or .jpg file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
url = super().validate(value, field, config) # basic url validation
has_image_extension = any(url.endswith(ext) for ext in IMAGE_FILE_FORMATS)
if not has_image_extension:
raise ValueError(
f'Image URL must have one of the following extensions:'
f'{IMAGE_FILE_FORMATS}'
)
return cls(str(url), scheme=None)
def load(
self,
width: Optional[int] = None,
height: Optional[int] = None,
axis_layout: Tuple[str, str, str] = ('H', 'W', 'C'),
timeout: Optional[float] = None,
) -> np.ndarray:
"""
Load the data from the url into a numpy.ndarray image tensor
EXAMPLE USAGE
.. code-block:: python
from docarray import BaseDoc
from docarray.typing import ImageUrl
import numpy as np
class MyDoc(BaseDoc):
img_url: ImageUrl
doc = MyDoc(
img_url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg"
)
img_tensor = doc.img_url.load()
assert isinstance(img_tensor, np.ndarray)
img_tensor = doc.img_url.load(height=224, width=224)
assert img_tensor.shape == (224, 224, 3)
layout = ('C', 'W', 'H')
img_tensor = doc.img_url.load(height=100, width=200, axis_layout=layout)
assert img_tensor.shape == (3, 200, 100)
:param width: width of the image tensor.
:param height: height of the image tensor.
:param axis_layout: ordering of the different image axes.
'H' = height, 'W' = width, 'C' = color channel
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:return: np.ndarray representing the image as RGB values
"""
from docarray.typing.bytes.image_bytes import ImageBytes
buffer = ImageBytes(self.load_bytes(timeout=timeout))
return buffer.load(width, height, axis_layout)
def display(self) -> None:
"""
Display image data from url in notebook.
"""
if is_notebook():
from IPython.display import Image, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Image(url=self))
else:
display(Image(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
_base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
# inference on val dataset and dump the proposals with evaluate metric
# data_root = 'data/coco/'
# test_evaluator = [
# dict(
# type='DumpProposals',
# output_dir=data_root + 'proposals/',
# proposals_file='rpn_r50_fpn_1x_val2017.pkl'),
# dict(
# type='CocoMetric',
# ann_file=data_root + 'annotations/instances_val2017.json',
# metric='proposal_fast',
# file_client_args={{_base_.file_client_args}},
# format_only=False)
# ]
# inference on training dataset and dump the proposals without evaluate metric
# data_root = 'data/coco/'
# test_dataloader = dict(
# dataset=dict(
# ann_file='annotations/instances_train2017.json',
# data_prefix=dict(img='train2017/')))
#
# test_evaluator = [
# dict(
# type='DumpProposals',
# output_dir=data_root + 'proposals/',
# proposals_file='rpn_r50_fpn_1x_train2017.pkl'),
# ]
|
_base_ = [
'../_base_/models/rpn_r50_fpn.py', '../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.integrations.providers import ProviderName
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH_IS_CONFIGURED = bool(
secrets.github_client_id and secrets.github_client_secret
)
GithubCredentials = APIKeyCredentials | OAuth2Credentials
GithubCredentialsInput = CredentialsMetaInput[
Literal[ProviderName.GITHUB],
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
]
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
"""
Creates a GitHub credentials input on a block.
Params:
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
""" # noqa
return CredentialsField(
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
from typing import Literal
from pydantic import SecretStr
from backend.data.model import (
APIKeyCredentials,
CredentialsField,
CredentialsMetaInput,
OAuth2Credentials,
)
from backend.util.settings import Secrets
secrets = Secrets()
GITHUB_OAUTH_IS_CONFIGURED = bool(
secrets.github_client_id and secrets.github_client_secret
)
GithubCredentials = APIKeyCredentials | OAuth2Credentials
GithubCredentialsInput = CredentialsMetaInput[
Literal["github"],
Literal["api_key", "oauth2"] if GITHUB_OAUTH_IS_CONFIGURED else Literal["api_key"],
]
def GithubCredentialsField(scope: str) -> GithubCredentialsInput:
"""
Creates a GitHub credentials input on a block.
Params:
scope: The authorization scope needed for the block to work. ([list of available scopes](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes))
""" # noqa
return CredentialsField(
provider="github",
supported_credential_types=(
{"api_key", "oauth2"} if GITHUB_OAUTH_IS_CONFIGURED else {"api_key"}
),
required_scopes={scope},
description="The GitHub integration can be used with OAuth, "
"or any API key with sufficient permissions for the blocks it is used on.",
)
TEST_CREDENTIALS = APIKeyCredentials(
id="01234567-89ab-cdef-0123-456789abcdef",
provider="github",
api_key=SecretStr("mock-github-api-key"),
title="Mock GitHub API key",
expires_at=None,
)
TEST_CREDENTIALS_INPUT = {
"provider": TEST_CREDENTIALS.provider,
"id": TEST_CREDENTIALS.id,
"type": TEST_CREDENTIALS.type,
"title": TEST_CREDENTIALS.type,
}
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
if backend not in ("tensorflow", "jax", "torch", "numpy"):
raise ValueError(
"Available backends are ('tensorflow', 'jax', 'torch' and "
f"'numpy'). Received: backend={backend}"
)
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
module = importlib.import_module("keras.src.backend.tensorflow")
return getattr(module, name)
if self._backend == "jax":
module = importlib.import_module("keras.src.backend.jax")
return getattr(module, name)
if self._backend == "torch":
module = importlib.import_module("keras.src.backend.torch")
return getattr(module, name)
if self._backend == "numpy":
if backend_module.backend() == "numpy":
return getattr(backend_module, name)
else:
raise NotImplementedError(
"Currently, we cannot dynamically import the numpy backend "
"because it would disrupt the namespace of the import."
)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
from keras.src.backend import tensorflow as tf_backend
return getattr(tf_backend, name)
if self._backend == "jax":
from keras.src.backend import jax as jax_backend
return getattr(jax_backend, name)
if self._backend == "torch":
from keras.src.backend import torch as torch_backend
return getattr(torch_backend, name)
if self._backend == "numpy":
# TODO (ariG23498):
# The import `from keras.src.backend import numpy as numpy_backend`
# is not working. This is a temporary fix.
# The import is redirected to `keras.backend.numpy.numpy.py`
from keras.src import backend as numpy_backend
return getattr(numpy_backend, name)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
import logging
import typing
from autogpt_libs.auth import requires_admin_user
from autogpt_libs.auth.depends import get_user_id
from fastapi import APIRouter, Body, Depends
from prisma import Json
from prisma.enums import CreditTransactionType
from backend.data.credit import admin_get_user_history, get_user_credit_model
from backend.server.v2.admin.model import AddUserCreditsResponse, UserHistoryResponse
logger = logging.getLogger(__name__)
_user_credit_model = get_user_credit_model()
router = APIRouter(
prefix="/admin",
tags=["credits", "admin"],
dependencies=[Depends(requires_admin_user)],
)
@router.post(
"/add_credits", response_model=AddUserCreditsResponse, summary="Add Credits to User"
)
async def add_user_credits(
user_id: typing.Annotated[str, Body()],
amount: typing.Annotated[int, Body()],
comments: typing.Annotated[str, Body()],
admin_user: typing.Annotated[
str,
Depends(get_user_id),
],
):
""" """
logger.info(f"Admin user {admin_user} is adding {amount} credits to user {user_id}")
new_balance, transaction_key = await _user_credit_model._add_transaction(
user_id,
amount,
transaction_type=CreditTransactionType.GRANT,
metadata=Json({"admin_id": admin_user, "reason": comments}),
)
return {
"new_balance": new_balance,
"transaction_key": transaction_key,
}
@router.get(
"/users_history",
response_model=UserHistoryResponse,
summary="Get All Users History",
)
async def admin_get_all_user_history(
admin_user: typing.Annotated[
str,
Depends(get_user_id),
],
search: typing.Optional[str] = None,
page: int = 1,
page_size: int = 20,
transaction_filter: typing.Optional[CreditTransactionType] = None,
):
""" """
logger.info(f"Admin user {admin_user} is getting grant history")
try:
resp = await admin_get_user_history(
page=page,
page_size=page_size,
search=search,
transaction_filter=transaction_filter,
)
logger.info(f"Admin user {admin_user} got {len(resp.history)} grant history")
return resp
except Exception as e:
logger.exception(f"Error getting grant history: {e}")
raise e
|
import logging
import typing
from autogpt_libs.auth import requires_admin_user
from autogpt_libs.auth.depends import get_user_id
from fastapi import APIRouter, Body, Depends
from prisma import Json
from prisma.enums import CreditTransactionType
from backend.data.credit import admin_get_user_history, get_user_credit_model
from backend.server.v2.admin.model import AddUserCreditsResponse, UserHistoryResponse
logger = logging.getLogger(__name__)
_user_credit_model = get_user_credit_model()
router = APIRouter(
prefix="/admin",
tags=["credits", "admin"],
dependencies=[Depends(requires_admin_user)],
)
@router.post("/add_credits", response_model=AddUserCreditsResponse)
async def add_user_credits(
user_id: typing.Annotated[str, Body()],
amount: typing.Annotated[int, Body()],
comments: typing.Annotated[str, Body()],
admin_user: typing.Annotated[
str,
Depends(get_user_id),
],
):
""" """
logger.info(f"Admin user {admin_user} is adding {amount} credits to user {user_id}")
new_balance, transaction_key = await _user_credit_model._add_transaction(
user_id,
amount,
transaction_type=CreditTransactionType.GRANT,
metadata=Json({"admin_id": admin_user, "reason": comments}),
)
return {
"new_balance": new_balance,
"transaction_key": transaction_key,
}
@router.get(
"/users_history",
response_model=UserHistoryResponse,
)
async def admin_get_all_user_history(
admin_user: typing.Annotated[
str,
Depends(get_user_id),
],
search: typing.Optional[str] = None,
page: int = 1,
page_size: int = 20,
transaction_filter: typing.Optional[CreditTransactionType] = None,
):
""" """
logger.info(f"Admin user {admin_user} is getting grant history")
try:
resp = await admin_get_user_history(
page=page,
page_size=page_size,
search=search,
transaction_filter=transaction_filter,
)
logger.info(f"Admin user {admin_user} got {len(resp.history)} grant history")
return resp
except Exception as e:
logger.exception(f"Error getting grant history: {e}")
raise e
|
import torch
from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class RNNTBeamSearchTestImpl(TestBaseMixin):
def _get_input_config(self):
model_config = self._get_model_config()
return {
"batch_size": 1,
"max_input_length": 61,
"num_symbols": model_config["num_symbols"],
"input_dim": model_config["input_dim"],
"right_context_length": model_config["right_context_length"],
"segment_length": model_config["segment_length"],
}
def _get_model_config(self):
return {
"input_dim": 80,
"encoding_dim": 128,
"num_symbols": 256,
"segment_length": 16,
"right_context_length": 4,
"time_reduction_input_dim": 128,
"time_reduction_stride": 4,
"transformer_num_heads": 4,
"transformer_ffn_dim": 64,
"transformer_num_layers": 3,
"transformer_dropout": 0.0,
"transformer_activation": "relu",
"transformer_left_context_length": 30,
"transformer_max_memory_size": 0,
"transformer_weight_init_scale_strategy": "depthwise",
"transformer_tanh_on_mem": True,
"symbol_embedding_dim": 64,
"num_lstm_layers": 2,
"lstm_layer_norm": True,
"lstm_layer_norm_epsilon": 1e-3,
"lstm_dropout": 0.0,
}
def _get_model(self):
return emformer_rnnt_model(**self._get_model_config()).to(device=self.device, dtype=self.dtype).eval()
def test_torchscript_consistency_forward(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `forward`."""
input_config = self._get_input_config()
batch_size = input_config["batch_size"]
max_input_length = input_config["max_input_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
lengths = torch.randint(1, max_input_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)
model = self._get_model()
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search(input, lengths, beam_width)
scripted_res = scripted(input, lengths, beam_width)
self.assertEqual(res, scripted_res)
def test_torchscript_consistency_infer(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `infer`."""
input_config = self._get_input_config()
segment_length = input_config["segment_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(segment_length + right_context_length, input_dim).to(device=self.device, dtype=self.dtype)
lengths = torch.randint(1, segment_length + right_context_length + 1, ()).to(
device=self.device, dtype=torch.int32
)
model = self._get_model()
state, hypo = None, None
scripted_state, scripted_hypo = None, None
for _ in range(2):
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search.infer(input, lengths, beam_width, state=state, hypothesis=hypo)
scripted_res = scripted.infer(input, lengths, beam_width, state=scripted_state, hypothesis=scripted_hypo)
self.assertEqual(res, scripted_res)
state = res[1]
hypo = res[0]
scripted_state = scripted_res[1]
scripted_hypo = scripted_res[0]
|
import torch
from torchaudio.models import emformer_rnnt_model, RNNTBeamSearch
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class RNNTBeamSearchTestImpl(TestBaseMixin):
def _get_input_config(self):
model_config = self._get_model_config()
return {
"batch_size": 1,
"max_input_length": 61,
"num_symbols": model_config["num_symbols"],
"input_dim": model_config["input_dim"],
"right_context_length": model_config["right_context_length"],
"segment_length": model_config["segment_length"],
}
def _get_model_config(self):
return {
"input_dim": 80,
"encoding_dim": 128,
"num_symbols": 256,
"segment_length": 16,
"right_context_length": 4,
"time_reduction_input_dim": 128,
"time_reduction_stride": 4,
"transformer_num_heads": 4,
"transformer_ffn_dim": 64,
"transformer_num_layers": 3,
"transformer_dropout": 0.0,
"transformer_activation": "relu",
"transformer_left_context_length": 30,
"transformer_max_memory_size": 0,
"transformer_weight_init_scale_strategy": "depthwise",
"transformer_tanh_on_mem": True,
"symbol_embedding_dim": 64,
"num_lstm_layers": 2,
"lstm_layer_norm": True,
"lstm_layer_norm_epsilon": 1e-3,
"lstm_dropout": 0.0,
}
def _get_model(self):
return emformer_rnnt_model(**self._get_model_config()).to(device=self.device, dtype=self.dtype).eval()
def test_torchscript_consistency_forward(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `forward`."""
input_config = self._get_input_config()
batch_size = input_config["batch_size"]
max_input_length = input_config["max_input_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(batch_size, max_input_length + right_context_length, input_dim).to(
device=self.device, dtype=self.dtype
)
lengths = torch.randint(1, max_input_length + 1, (batch_size,)).to(device=self.device, dtype=torch.int32)
model = self._get_model()
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search(input, lengths, beam_width)
scripted_res = scripted(input, lengths, beam_width)
self.assertEqual(res, scripted_res)
def test_torchscript_consistency_infer(self):
r"""Verify that scripting RNNTBeamSearch does not change the behavior of method `infer`."""
input_config = self._get_input_config()
segment_length = input_config["segment_length"]
right_context_length = input_config["right_context_length"]
input_dim = input_config["input_dim"]
num_symbols = input_config["num_symbols"]
blank_idx = num_symbols - 1
beam_width = 5
input = torch.rand(segment_length + right_context_length, input_dim).to(device=self.device, dtype=self.dtype)
lengths = torch.randint(1, segment_length + right_context_length + 1, ()).to(
device=self.device, dtype=torch.int32
)
model = self._get_model()
state, hypo = None, None
scripted_state, scripted_hypo = None, None
for _ in range(2):
beam_search = RNNTBeamSearch(model, blank_idx)
scripted = torch_script(beam_search)
res = beam_search.infer(input, lengths, beam_width, state=state, hypothesis=hypo)
scripted_res = scripted.infer(input, lengths, beam_width, state=scripted_state, hypothesis=scripted_hypo)
self.assertEqual(res, scripted_res)
state = res[1]
hypo = res[0][0]
scripted_state = scripted_res[1]
scripted_hypo = scripted_res[0][0]
|
"""Standard LangChain interface tests."""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from langchain_anthropic import ChatAnthropic
class TestAnthropicStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
@pytest.mark.benchmark
def test_init_time_with_client(benchmark: BenchmarkFixture) -> None:
"""Test initialization time, accounting for lazy loading of client."""
def _init_in_loop_with_clients() -> None:
for _ in range(10):
llm = ChatAnthropic(model="claude-3-5-haiku-latest")
_ = llm._client
_ = llm._async_client
benchmark(_init_in_loop_with_clients)
|
"""Standard LangChain interface tests"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ChatModelUnitTests
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from langchain_anthropic import ChatAnthropic
class TestAnthropicStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": "claude-3-haiku-20240307"}
@pytest.mark.benchmark
def test_init_time_with_client(benchmark: BenchmarkFixture) -> None:
"""Test initialization time, accounting for lazy loading of client."""
def _init_in_loop_with_clients() -> None:
for _ in range(10):
llm = ChatAnthropic(model="claude-3-5-haiku-latest")
_ = llm._client
_ = llm._async_client
benchmark(_init_in_loop_with_clients)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.40.2'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
from docarray.utils._internal.pydantic import is_pydantic_v2
def unpickle_doclist(doc_type, b):
return DocList[doc_type].from_bytes(b, protocol="protobuf")
def unpickle_docvec(doc_type, tensor_type, b):
return DocVec[doc_type].from_bytes(b, protocol="protobuf", tensor_type=tensor_type)
if is_pydantic_v2:
# Register the pickle functions
def register_serializers():
import copyreg
from functools import partial
unpickle_doc_fn = partial(BaseDoc.from_bytes, protocol="protobuf")
def pickle_doc(doc):
b = doc.to_bytes(protocol='protobuf')
return unpickle_doc_fn, (doc.__class__, b)
# Register BaseDoc serialization
copyreg.pickle(BaseDoc, pickle_doc)
# For DocList, we need to hook into __reduce__ since it's a generic
def pickle_doclist(doc_list):
b = doc_list.to_bytes(protocol='protobuf')
doc_type = doc_list.doc_type
return unpickle_doclist, (doc_type, b)
# Replace DocList.__reduce__ with a method that returns the correct format
def doclist_reduce(self):
return pickle_doclist(self)
DocList.__reduce__ = doclist_reduce
# For DocVec, we need to hook into __reduce__ since it's a generic
def pickle_docvec(doc_vec):
b = doc_vec.to_bytes(protocol='protobuf')
doc_type = doc_vec.doc_type
tensor_type = doc_vec.tensor_type
return unpickle_docvec, (doc_type, tensor_type, b)
# Replace DocList.__reduce__ with a method that returns the correct format
def docvec_reduce(self):
return pickle_docvec(self)
DocVec.__reduce__ = docvec_reduce
register_serializers()
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.40.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
from docarray.utils._internal.pydantic import is_pydantic_v2
def unpickle_doclist(doc_type, b):
return DocList[doc_type].from_bytes(b, protocol="protobuf")
def unpickle_docvec(doc_type, tensor_type, b):
return DocVec[doc_type].from_bytes(b, protocol="protobuf", tensor_type=tensor_type)
if is_pydantic_v2:
# Register the pickle functions
def register_serializers():
import copyreg
from functools import partial
unpickle_doc_fn = partial(BaseDoc.from_bytes, protocol="protobuf")
def pickle_doc(doc):
b = doc.to_bytes(protocol='protobuf')
return unpickle_doc_fn, (doc.__class__, b)
# Register BaseDoc serialization
copyreg.pickle(BaseDoc, pickle_doc)
# For DocList, we need to hook into __reduce__ since it's a generic
def pickle_doclist(doc_list):
b = doc_list.to_bytes(protocol='protobuf')
doc_type = doc_list.doc_type
return unpickle_doclist, (doc_type, b)
# Replace DocList.__reduce__ with a method that returns the correct format
def doclist_reduce(self):
return pickle_doclist(self)
DocList.__reduce__ = doclist_reduce
# For DocVec, we need to hook into __reduce__ since it's a generic
def pickle_docvec(doc_vec):
b = doc_vec.to_bytes(protocol='protobuf')
doc_type = doc_vec.doc_type
tensor_type = doc_vec.tensor_type
return unpickle_docvec, (doc_type, tensor_type, b)
# Replace DocList.__reduce__ with a method that returns the correct format
def docvec_reduce(self):
return pickle_docvec(self)
DocVec.__reduce__ = docvec_reduce
register_serializers()
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../../examples/sentence_transformer/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"SiameseDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"SiameseDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from langchain_anthropic import __all__
EXPECTED_ALL = [
"ChatAnthropicMessages",
"ChatAnthropic",
"convert_to_anthropic_tool",
"Anthropic",
"AnthropicLLM",
]
def test_all_imports() -> None:
assert sorted(EXPECTED_ALL) == sorted(__all__)
|
from langchain_anthropic import __all__
EXPECTED_ALL = ["ChatAnthropicMessages", "ChatAnthropic", "Anthropic", "AnthropicLLM"]
def test_all_imports() -> None:
assert sorted(EXPECTED_ALL) == sorted(__all__)
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
_EMBEDDING_DIM = 768
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
__copyright__ = 'Copyright (c) 2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import subprocess
from typing import Callable, List
import pytest
from jina import DocumentArray, Flow
from ...transform_encoder import TransformerTorchEncoder
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(data_generator: Callable, request_size: int):
with Flow(return_results=True).add(uses=TransformerTorchEncoder) as flow:
resp = flow.post(
on='/index',
inputs=data_generator(),
request_size=request_size,
return_results=True,
)
assert min(len(resp) * request_size, 50) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
def filter_none(elements):
return list(filter(lambda e: e is not None, elements))
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(pytest.lazy_fixture('docs_with_text'), [['r', 10], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_text'),
[['r', 0], ['c', 10], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_text'),
[['r', 0], ['c', 0], ['cc', 10]],
'cc',
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
assert (
len(
filter_none(
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
)
== count
)
return validate
flow = Flow(return_results=True).add(uses=TransformerTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_path]},
return_results=True,
)
validate_traversal(docs_per_path)(resp)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from mmengine.hooks import EMAHook
from mmengine.model import ExponentialMovingAverage
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS, MODEL_WRAPPERS
from mmengine.runner import Runner
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, data_batch, return_loss=False):
inputs, labels = [], []
for x in data_batch:
inputs.append(x['inputs'])
labels.append(x['data_sample'])
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
inputs = torch.stack(inputs).to(device)
labels = torch.stack(labels).to(device)
outputs = self.linear(inputs)
if return_loss:
loss = (labels - outputs).sum()
outputs = dict(loss=loss, log_vars=dict(loss=loss.item()))
return outputs
else:
outputs = dict(log_vars=dict(a=1, b=0.5))
return outputs
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestEMAHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_ema_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
val_cfg=dict(),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook', )],
experiment_name='test1')
runner.train()
for hook in runner.hooks:
if isinstance(hook, EMAHook):
self.assertTrue(
isinstance(hook.ema_model, ExponentialMovingAverage))
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
self.assertTrue('ema_state_dict' in checkpoint)
self.assertTrue(checkpoint['ema_state_dict']['steps'] == 8)
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(model),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test3')
runner.test()
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from unittest import TestCase
from unittest.mock import Mock
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from mmengine.hooks import EMAHook
from mmengine.model import ExponentialMovingAverage
from mmengine.optim import OptimWrapper
from mmengine.registry import DATASETS, MODEL_WRAPPERS
from mmengine.runner import Runner
class ToyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, data_batch, return_loss=False):
inputs, labels = [], []
for x in data_batch:
inputs.append(x['inputs'])
labels.append(x['data_sample'])
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
inputs = torch.stack(inputs).to(device)
labels = torch.stack(labels).to(device)
outputs = self.linear(inputs)
if return_loss:
loss = (labels - outputs).sum()
outputs = dict(loss=loss, log_vars=dict(loss=loss.item()))
return outputs
else:
outputs = dict(log_vars=dict(a=1, b=0.5))
return outputs
@DATASETS.register_module()
class DummyDataset(Dataset):
METAINFO = dict() # type: ignore
data = torch.randn(12, 2)
label = torch.ones(12)
def __len__(self):
return self.data.size(0)
def __getitem__(self, index):
return dict(inputs=self.data[index], data_sample=self.label[index])
class TestEMAHook(TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_ema_hook(self):
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
model = ToyModel().to(device)
evaluator = Mock()
evaluator.evaluate = Mock(return_value=dict(acc=0.5))
runner = Runner(
model=model,
train_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
val_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=False),
batch_size=3,
num_workers=0),
val_evaluator=evaluator,
work_dir=self.temp_dir.name,
optim_wrapper=OptimWrapper(
torch.optim.Adam(ToyModel().parameters())),
train_cfg=dict(by_epoch=True, max_epochs=2),
val_cfg=dict(interval=1),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook', )],
experiment_name='test1')
runner.train()
for hook in runner.hooks:
if isinstance(hook, EMAHook):
self.assertTrue(
isinstance(hook.ema_model, ExponentialMovingAverage))
self.assertTrue(
osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
self.assertTrue('ema_state_dict' in checkpoint)
self.assertTrue(checkpoint['ema_state_dict']['steps'] == 8)
# load and testing
runner = Runner(
model=model,
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test2')
runner.test()
@MODEL_WRAPPERS.register_module()
class DummyWrapper(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
# with model wrapper
runner = Runner(
model=DummyWrapper(model),
test_dataloader=dict(
dataset=dict(type='DummyDataset'),
sampler=dict(type='DefaultSampler', shuffle=True),
batch_size=3,
num_workers=0),
test_evaluator=evaluator,
test_cfg=dict(),
work_dir=self.temp_dir.name,
load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
default_hooks=dict(logger=None),
custom_hooks=[dict(type='EMAHook')],
experiment_name='test3')
runner.test()
|
from langchain_huggingface.chat_models import (
ChatHuggingFace, # type: ignore[import-not-found]
)
from langchain_huggingface.embeddings import (
HuggingFaceEmbeddings,
HuggingFaceEndpointEmbeddings,
)
from langchain_huggingface.llms import (
HuggingFaceEndpoint,
HuggingFacePipeline,
)
__all__ = [
"ChatHuggingFace",
"HuggingFaceEmbeddings",
"HuggingFaceEndpoint",
"HuggingFaceEndpointEmbeddings",
"HuggingFacePipeline",
]
|
from langchain_huggingface.chat_models import (
ChatHuggingFace, # type: ignore[import-not-found]
)
from langchain_huggingface.embeddings import (
HuggingFaceEmbeddings,
HuggingFaceEndpointEmbeddings,
)
from langchain_huggingface.llms import (
HuggingFaceEndpoint,
HuggingFacePipeline,
)
__all__ = [
"ChatHuggingFace",
"HuggingFaceEndpointEmbeddings",
"HuggingFaceEmbeddings",
"HuggingFaceEndpoint",
"HuggingFacePipeline",
]
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply("y1", 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
"""
Prompts for implementing Chain of Abstraction.
While official prompts are not given (and the paper finetunes models for the task),
we can take inspiration and use few-shot prompting to generate a prompt for implementing
chain of abstraction in an LLM agent.
"""
REASONING_PROMPT_TEMPALTE = """Generate an abstract plan of reasoning using placeholders for the specific values and function calls needed.
The placeholders should be labeled y1, y2, etc.
Function calls should be represented as inline strings like [FUNC {{function_name}}({{input1}}, {{input2}}, ...) = {{output_placeholder}}].
Assume someone will read the plan after the functions have been executed in order to make a final response.
Not every question will require function calls to answer.
If you do invoke a function, only use the available functions, do not make up functions.
Example:
-----------
Available functions:
```python
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.\"\"\"
...
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.\"\"\"
...
```
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Abstract plan of reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = y1] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(y1, 3) = y2] apples.
Your Turn:
-----------
Available functions:
```python
{functions}
```
Question:
{question}
Abstract plan of reasoning:
"""
REFINE_REASONING_PROMPT_TEMPALTE = """Generate a response to a question by using a previous abstract plan of reasoning. Use the previous reasoning as context to write a response to the question.
Example:
-----------
Question:
Sally has 3 apples and buys 2 more. Then magically, a wizard casts a spell that multiplies the number of apples by 3. How many apples does Sally have now?
Previous reasoning:
After buying the apples, Sally has [FUNC add(3, 2) = 5] apples. Then, the wizard casts a spell to multiply the number of apples by 3, resulting in [FUNC multiply(5, 3) = 15] apples.
Response:
After the wizard casts the spell, Sally has 15 apples.
Your Turn:
-----------
Question:
{question}
Previous reasoning:
{prev_reasoning}
Response:
"""
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
import transformers
from PIL import Image
from sentence_transformers.models.Asym import InputModule
class CLIPModel(InputModule):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.model.save_pretrained(output_path, safe_serialization=safe_serialization)
self.processor.save_pretrained(output_path)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
local_path = cls.load_dir_path(
model_name_or_path=model_name_or_path,
subfolder=subfolder,
token=token,
cache_folder=cache_folder,
revision=revision,
local_files_only=local_files_only,
)
return cls(local_path)
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
save_in_root: bool = True
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
@property
def max_seq_length(self) -> int:
return self.processor.tokenizer.model_max_length
@max_seq_length.setter
def max_seq_length(self, value: int) -> None:
self.processor.tokenizer.model_max_length = value
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, padding=padding, truncation=True, return_tensors="pt")
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> CLIPModel:
return CLIPModel(model_name=input_path)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>>softmax_layer = keras.layers.activations.Softmax()
>>>input = np.array([1.0, 2.0, 1.0])
>>>result = softmax_layer(input)
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.axis = axis
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>>softmax_layer = keras.layers.activations.Softmax()
>>>input = np.array([1.0, 2.0, 1.0])
>>>result = softmax_layer(input)
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.built = True
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.7.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.14.6.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class DoubleHeadRoIHead(StandardRoIHead):
"""RoI head for Double Head RCNN.
https://arxiv.org/abs/1904.06493
"""
def __init__(self, reg_roi_scale_factor, **kwargs):
super(DoubleHeadRoIHead, self).__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing time."""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
|
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class DoubleHeadRoIHead(StandardRoIHead):
"""RoI head for Double Head RCNN.
https://arxiv.org/abs/1904.06493
"""
def __init__(self, reg_roi_scale_factor, **kwargs):
super(DoubleHeadRoIHead, self).__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing time."""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
|
from collections.abc import Sequence
from langchain_core.tools import BaseTool
def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None:
"""Validate tools for single input.
Args:
class_name: Name of the class.
tools: List of tools to validate.
Raises:
ValueError: If a multi-input tool is found in tools.
"""
for tool in tools:
if not tool.is_single_input:
raise ValueError(
f"{class_name} does not support multi-input tool {tool.name}."
)
|
from typing import Sequence
from langchain_core.tools import BaseTool
def validate_tools_single_input(class_name: str, tools: Sequence[BaseTool]) -> None:
"""Validate tools for single input.
Args:
class_name: Name of the class.
tools: List of tools to validate.
Raises:
ValueError: If a multi-input tool is found in tools.
"""
for tool in tools:
if not tool.is_single_input:
raise ValueError(
f"{class_name} does not support multi-input tool {tool.name}."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backends import (BaseStorageBackend, HTTPBackend, LmdbBackend,
LocalBackend, MemcachedBackend, PetrelBackend,
register_backend)
from .file_client import FileClient, HardDiskBackend
from .handlers import (BaseFileHandler, JsonHandler, PickleHandler,
YamlHandler, register_handler)
from .io import (copy_if_symlink_fails, copyfile, copyfile_from_local,
copyfile_to_local, copytree, copytree_from_local,
copytree_to_local, dump, exists, generate_presigned_url, get,
get_file_backend, get_local_path, get_text, isdir, isfile,
join_path, list_dir_or_file, load, put, put_text, remove,
rmtree)
from .parse import dict_from_file, list_from_file
__all__ = [
'BaseStorageBackend', 'FileClient', 'PetrelBackend', 'MemcachedBackend',
'LmdbBackend', 'HardDiskBackend', 'LocalBackend', 'HTTPBackend',
'copy_if_symlink_fails', 'copyfile', 'copyfile_from_local',
'copyfile_to_local', 'copytree', 'copytree_from_local',
'copytree_to_local', 'exists', 'generate_presigned_url', 'get',
'get_file_backend', 'get_local_path', 'get_text', 'isdir', 'isfile',
'join_path', 'list_dir_or_file', 'put', 'put_text', 'remove', 'rmtree',
'load', 'dump', 'register_handler', 'BaseFileHandler', 'JsonHandler',
'PickleHandler', 'YamlHandler', 'list_from_file', 'dict_from_file',
'register_backend'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .file_client import (BaseStorageBackend, FileClient, HardDiskBackend,
HTTPBackend, LmdbBackend, MemcachedBackend,
PetrelBackend)
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
from .io import dump, load, register_handler
from .parse import dict_from_file, list_from_file
__all__ = [
'BaseStorageBackend', 'FileClient', 'PetrelBackend', 'MemcachedBackend',
'LmdbBackend', 'HardDiskBackend', 'HTTPBackend', 'load', 'dump',
'register_handler', 'BaseFileHandler', 'JsonHandler', 'PickleHandler',
'YamlHandler', 'list_from_file', 'dict_from_file'
]
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import random
import pytest
import time
import os
cur_dir = os.path.dirname(os.path.abspath(__file__))
milvus_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker compose -f {milvus_yml} up -d --remove-orphans")
time.sleep(2)
yield
os.system(f"docker compose -f {milvus_yml} down --remove-orphans")
@pytest.fixture(scope='function')
def tmp_index_name():
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for _ in range(15))
return random_string
|
import string
import random
import pytest
import time
import os
cur_dir = os.path.dirname(os.path.abspath(__file__))
milvus_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker compose -f {milvus_yml} up -d --remove-orphans")
time.sleep(2)
yield
os.system(f"docker compose -f {milvus_yml} down --remove-orphans")
@pytest.fixture(scope='function')
def tmp_index_name():
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for _ in range(15))
return random_string
|
"""This module contains all classes used for composing graphs over indices."""
from llama_index.core.indices.composability.graph import ComposableGraph
__all__ = ["ComposableGraph"]
|
"""This module contains all classes used for composing graphs over indices."""
from llama_index.core.indices.composability.graph import ComposableGraph
__all__ = ["ComposableGraph"]
|
"""Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.tensorflow import trainer as tf_trainer
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class DistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
def test_variable_creation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
dense = layers.Dense(2)
dense.build([4, 2])
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues)
self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__)
def test_strategy_run(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input(shape=[4])
dense = layers.Dense(2)
output = dense(inputs)
model = models.Functional(inputs, output)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
def input_fn(ctx):
if ctx.replica_id_in_sync_group == 1:
return tf.ones([8, 4])
else:
return tf.zeros([8, 4])
distributed_inputs = (
strategy.experimental_distribute_values_from_function(input_fn)
)
@tf.function
def run_fn(data):
return model(data)
result = strategy.run(run_fn, args=(distributed_inputs,))
self.assertIsInstance(
result, tf.types.experimental.distributed.PerReplica
)
self.assertLen(result.values, 2)
self.assertEqual(result.values[0].shape, [8, 2])
self.assertEqual(result.values[1].shape, [8, 2])
self.assertNotAllClose(result.values[0], result.values[1])
self.assertAllClose(result.values[0], tf.zeros([8, 2]))
def test_epoch_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
epoch_iterator = tf_trainer.TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
distribute_strategy=strategy,
)
steps_seen = []
for step, data_iterator in epoch_iterator:
steps_seen.append(step)
batch = next(data_iterator)
self.assertEqual(len(batch), 3)
x, y, sample_weight = batch
self.assertTrue(
isinstance(x, tf.types.experimental.distributed.PerReplica)
)
# Make sure the local batch size is 8
if step < 6:
self.assertEqual(x.values[0].shape, [8, 16])
self.assertEqual(y.values[0].shape, [8, 4])
self.assertEqual(sample_weight.values[0].shape, [8])
else:
# Last partial batch
self.assertEqual(x.values[0].shape, [2, 16])
self.assertEqual(y.values[0].shape, [2, 4])
self.assertEqual(sample_weight.values[0].shape, [2])
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
|
"""Tests for tf.distribute related functionality under tf implementation."""
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.tensorflow import trainer as tf_trainer
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class DistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
def test_variable_creation(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
dense = layers.Dense(2)
dense.build([4, 2])
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
self.assertIn("MirroredVariable", dense.kernel.value.__class__.__name__)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(dense.bias.value, tf.distribute.DistributedValues)
self.assertIn("MirroredVariable", dense.bias.value.__class__.__name__)
def test_strategy_run(self):
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
with strategy.scope():
inputs = layers.Input(shape=[4])
dense = layers.Dense(2)
output = dense(inputs)
model = models.Functional(inputs, output)
self.assertIsInstance(dense.kernel, backend.Variable)
self.assertIsInstance(
dense.kernel.value, tf.distribute.DistributedValues
)
def input_fn(ctx):
if ctx.replica_id_in_sync_group == 1:
return tf.ones([8, 4])
else:
return tf.zeros([8, 4])
distributed_inputs = (
strategy.experimental_distribute_values_from_function(input_fn)
)
@tf.function
def run_fn(data):
return model(data)
result = strategy.run(run_fn, args=(distributed_inputs,))
self.assertIsInstance(
result, tf.types.experimental.distributed.PerReplica
)
self.assertLen(result.values, 2)
self.assertEqual(result.values[0].shape, [8, 2])
self.assertEqual(result.values[1].shape, [8, 2])
self.assertNotAllClose(result.values[0], result.values[1])
self.assertAllClose(result.values[0], tf.zeros([8, 2]))
def test_epoch_iterator(self):
x = np.random.random((100, 16))
y = np.random.random((100, 4))
sample_weight = np.random.random((100,))
batch_size = 16
shuffle = True
strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
epoch_iterator = tf_trainer.TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
shuffle=shuffle,
distribute_strategy=strategy,
)
steps_seen = []
for step, data_iterator in epoch_iterator.enumerate_epoch():
steps_seen.append(step)
batch = next(data_iterator)
self.assertEqual(len(batch), 3)
x, y, sample_weight = batch
self.assertTrue(
isinstance(x, tf.types.experimental.distributed.PerReplica)
)
# Make sure the local batch size is 8
if step < 6:
self.assertEqual(x.values[0].shape, [8, 16])
self.assertEqual(y.values[0].shape, [8, 4])
self.assertEqual(sample_weight.values[0].shape, [8])
else:
# Last partial batch
self.assertEqual(x.values[0].shape, [2, 16])
self.assertEqual(y.values[0].shape, [2, 4])
self.assertEqual(sample_weight.values[0].shape, [2])
self.assertEqual(steps_seen, [0, 1, 2, 3, 4, 5, 6])
|
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.memory.vector_memory import VectorMemory
from llama_index.core.memory.simple_composable_memory import SimpleComposableMemory
from llama_index.core.memory.memory import Memory
from llama_index.core.memory.memory_blocks import (
StaticMemoryBlock,
VectorMemoryBlock,
FactExtractionMemoryBlock,
)
__all__ = [
"BaseMemory",
"Memory",
"StaticMemoryBlock",
"VectorMemoryBlock",
"FactExtractionMemoryBlock",
# Deprecated
"ChatMemoryBuffer",
"ChatSummaryMemoryBuffer",
"SimpleComposableMemory",
"VectorMemory",
]
|
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.memory.vector_memory import VectorMemory
from llama_index.core.memory.simple_composable_memory import SimpleComposableMemory
__all__ = [
"BaseMemory",
"ChatMemoryBuffer",
"ChatSummaryMemoryBuffer",
"SimpleComposableMemory",
"VectorMemory",
]
|
__version__ = '0.14.3'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.2'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
import os
import sys
from typing import Iterator, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray import Document
file_dir = os.path.dirname(__file__)
sys.path.append(os.path.dirname(file_dir))
def random_docs(
num_docs,
chunks_per_doc=5,
embed_dim=10,
jitter=1,
start_id=0,
embedding=True,
sparse_embedding=False,
text='hello world',
) -> Iterator['Document']:
from docarray import Document
next_chunk_doc_id = start_id + num_docs
for j in range(num_docs):
doc_id = str(start_id + j)
d = Document(id=doc_id)
d.text = text
d.tags['id'] = doc_id
if embedding:
if sparse_embedding:
from scipy.sparse import coo_matrix
d.embedding = coo_matrix(
(np.array([1, 1, 1]), (np.array([0, 1, 2]), np.array([1, 2, 1])))
)
else:
d.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
for _ in range(chunks_per_doc):
chunk_doc_id = str(next_chunk_doc_id)
c = Document(id=chunk_doc_id)
c.text = 'i\'m chunk %s from doc %s' % (chunk_doc_id, doc_id)
if embedding:
c.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
c.tags['parent_id'] = doc_id
c.tags['id'] = chunk_doc_id
d.chunks.append(c)
next_chunk_doc_id += 1
yield d
def validate_callback(mock, validate_func):
for args, kwargs in mock.call_args_list:
validate_func(*args, **kwargs)
mock.assert_called()
|
import os
import sys
from typing import Iterator, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from jina import Document
file_dir = os.path.dirname(__file__)
sys.path.append(os.path.dirname(file_dir))
def random_docs(
num_docs,
chunks_per_doc=5,
embed_dim=10,
jitter=1,
start_id=0,
embedding=True,
sparse_embedding=False,
text='hello world',
) -> Iterator['Document']:
from docarray import Document
next_chunk_doc_id = start_id + num_docs
for j in range(num_docs):
doc_id = str(start_id + j)
d = Document(id=doc_id)
d.text = text
d.tags['id'] = doc_id
if embedding:
if sparse_embedding:
from scipy.sparse import coo_matrix
d.embedding = coo_matrix(
(np.array([1, 1, 1]), (np.array([0, 1, 2]), np.array([1, 2, 1])))
)
else:
d.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
for _ in range(chunks_per_doc):
chunk_doc_id = str(next_chunk_doc_id)
c = Document(id=chunk_doc_id)
c.text = 'i\'m chunk %s from doc %s' % (chunk_doc_id, doc_id)
if embedding:
c.embedding = np.random.random(
[embed_dim + np.random.randint(0, jitter)]
)
c.tags['parent_id'] = doc_id
c.tags['id'] = chunk_doc_id
d.chunks.append(c)
next_chunk_doc_id += 1
yield d
def validate_callback(mock, validate_func):
for args, kwargs in mock.call_args_list:
validate_func(*args, **kwargs)
mock.assert_called()
|
import pytest
from docarray import Document, DocumentArray
from jina import Executor, requests
from jina.clients.request import request_generator
from jina.logging.logger import JinaLogger
from jina.parsers import set_pod_parser
from jina.serve.runtimes.request_handlers.data_request_handler import DataRequestHandler
class NewDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
return DocumentArray([Document(text='new document')])
class AsyncNewDocsExecutor(Executor):
@requests
async def foo(self, docs, **kwargs):
return DocumentArray([Document(text='new document')])
class ChangeDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'changed document'
class MergeChangeDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'changed document'
return docs
class ClearDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
docs.clear()
@pytest.fixture()
def logger():
return JinaLogger('data request handler')
@pytest.mark.asyncio
async def test_data_request_handler_new_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'NewDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 1
assert response.docs[0].text == 'new document'
@pytest.mark.asyncio
async def test_aync_data_request_handler_new_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'AsyncNewDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 1
assert response.docs[0].text == 'new document'
@pytest.mark.asyncio
async def test_data_request_handler_change_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'ChangeDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 10
for doc in response.docs:
assert doc.text == 'changed document'
@pytest.mark.asyncio
async def test_data_request_handler_change_docs_from_partial_requests(logger):
NUM_PARTIAL_REQUESTS = 5
args = set_pod_parser().parse_args(['--uses', 'MergeChangeDocsExecutor'])
handler = DataRequestHandler(args, logger)
partial_reqs = [
list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
] * NUM_PARTIAL_REQUESTS
assert len(partial_reqs) == 5
assert len(partial_reqs[0].docs) == 10
response = await handler.handle(requests=partial_reqs)
assert len(response.docs) == 10 * NUM_PARTIAL_REQUESTS
for doc in response.docs:
assert doc.text == 'changed document'
@pytest.mark.asyncio
async def test_data_request_handler_clear_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'ClearDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 0
|
import pytest
from docarray import Document, DocumentArray
from jina import Executor, requests
from jina.logging.logger import JinaLogger
from jina.parsers import set_pod_parser
from jina.serve.runtimes.request_handlers.data_request_handler import (
DataRequestHandler,
)
from jina.clients.request import request_generator
class NewDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
return DocumentArray([Document(text='new document')])
class AsyncNewDocsExecutor(Executor):
@requests
async def foo(self, docs, **kwargs):
return DocumentArray([Document(text='new document')])
class ChangeDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'changed document'
class MergeChangeDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'changed document'
return docs
class ClearDocsExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
docs.clear()
@pytest.fixture()
def logger():
return JinaLogger('data request handler')
@pytest.mark.asyncio
async def test_data_request_handler_new_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'NewDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 1
assert response.docs[0].text == 'new document'
@pytest.mark.asyncio
async def test_aync_data_request_handler_new_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'AsyncNewDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 1
assert response.docs[0].text == 'new document'
@pytest.mark.asyncio
async def test_data_request_handler_change_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'ChangeDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 10
for doc in response.docs:
assert doc.text == 'changed document'
@pytest.mark.asyncio
async def test_data_request_handler_change_docs_from_partial_requests(logger):
NUM_PARTIAL_REQUESTS = 5
args = set_pod_parser().parse_args(['--uses', 'MergeChangeDocsExecutor'])
handler = DataRequestHandler(args, logger)
partial_reqs = [
list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
] * NUM_PARTIAL_REQUESTS
assert len(partial_reqs) == 5
assert len(partial_reqs[0].docs) == 10
response = await handler.handle(requests=partial_reqs)
assert len(response.docs) == 10 * NUM_PARTIAL_REQUESTS
for doc in response.docs:
assert doc.text == 'changed document'
@pytest.mark.asyncio
async def test_data_request_handler_clear_docs(logger):
args = set_pod_parser().parse_args(['--uses', 'ClearDocsExecutor'])
handler = DataRequestHandler(args, logger)
req = list(
request_generator(
'/', DocumentArray([Document(text='input document') for _ in range(10)])
)
)[0]
assert len(req.docs) == 10
response = await handler.handle(requests=[req])
assert len(response.docs) == 0
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
def build(self, input_shape):
self.built = True
def call(self, inputs):
x = ops.cast(inputs, self.compute_dtype)
square_sum = ops.sum(ops.square(x), axis=self.axis, keepdims=True)
x_inv_norm = ops.rsqrt(ops.maximum(square_sum, 1e-12))
return ops.multiply(x, x_inv_norm)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
|
import posixpath
from pathlib import Path
import fsspec
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
@pytest.fixture
def mock_fsspec():
original_registry = fsspec.registry.copy()
fsspec.register_implementation("mock", MockFileSystem)
yield
fsspec.registry = original_registry
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
|
import posixpath
from pathlib import Path
import fsspec
import pytest
from fsspec.implementations.local import AbstractFileSystem, LocalFileSystem, stringify_path
class MockFileSystem(AbstractFileSystem):
protocol = "mock"
def __init__(self, *args, local_root_dir, **kwargs):
super().__init__()
self._fs = LocalFileSystem(*args, **kwargs)
self.local_root_dir = Path(local_root_dir).resolve().as_posix() + "/"
def mkdir(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.mkdir(path, *args, **kwargs)
def makedirs(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.makedirs(path, *args, **kwargs)
def rmdir(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rmdir(path)
def ls(self, path, detail=True, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = self._fs.ls(path, detail=detail, *args, **kwargs)
if detail:
return [{**info, "name": info["name"][len(self.local_root_dir) :]} for info in out]
else:
return [name[len(self.local_root_dir) :] for name in out]
def info(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
out = dict(self._fs.info(path, *args, **kwargs))
out["name"] = out["name"][len(self.local_root_dir) :]
return out
def cp_file(self, path1, path2, *args, **kwargs):
path1 = posixpath.join(self.local_root_dir, self._strip_protocol(path1))
path2 = posixpath.join(self.local_root_dir, self._strip_protocol(path2))
return self._fs.cp_file(path1, path2, *args, **kwargs)
def rm_file(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm_file(path, *args, **kwargs)
def rm(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.rm(path, *args, **kwargs)
def _open(self, path, *args, **kwargs):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs._open(path, *args, **kwargs)
def created(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.created(path)
def modified(self, path):
path = posixpath.join(self.local_root_dir, self._strip_protocol(path))
return self._fs.modified(path)
@classmethod
def _strip_protocol(cls, path):
path = stringify_path(path)
if path.startswith("mock://"):
path = path[7:]
return path
@pytest.fixture
def mock_fsspec(monkeypatch):
monkeypatch.setitem(fsspec.registry.target, "mock", MockFileSystem)
@pytest.fixture
def mockfs(tmp_path_factory, mock_fsspec):
local_fs_dir = tmp_path_factory.mktemp("mockfs")
return MockFileSystem(local_root_dir=local_fs_dir, auto_mkdir=True)
|
import json
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Food101(VisionDataset):
"""`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_.
The Food-101 is a challenging data set of 101 food categories with 101,000 images.
For each class, 250 manually reviewed test images are provided as well as 750 training images.
On purpose, the training images were not cleaned, and thus still contain some amount of noise.
This comes mostly in the form of intense colors and sometimes wrong labels. All images were
rescaled to have a maximum side length of 512 pixels.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
_MD5 = "85eeb15f3717b99a5da872d97d918f87"
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = Path(self.root) / "food-101"
self._meta_folder = self._base_folder / "meta"
self._images_folder = self._base_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._labels = []
self._image_files = []
with open(self._meta_folder / f"{split}.json") as f:
metadata = json.loads(f.read())
self.classes = sorted(metadata.keys())
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
for class_label, im_rel_paths in metadata.items():
self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
self._image_files += [
self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
import json
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class Food101(VisionDataset):
"""`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_.
The Food-101 is a challenging data set of 101 food categories with 101,000 images.
For each class, 250 manually reviewed test images are provided as well as 750 training images.
On purpose, the training images were not cleaned, and thus still contain some amount of noise.
This comes mostly in the form of intense colors and sometimes wrong labels. All images were
rescaled to have a maximum side length of 512 pixels.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz"
_MD5 = "85eeb15f3717b99a5da872d97d918f87"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = Path(self.root) / "food-101"
self._meta_folder = self._base_folder / "meta"
self._images_folder = self._base_folder / "images"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._labels = []
self._image_files = []
with open(self._meta_folder / f"{split}.json") as f:
metadata = json.loads(f.read())
self.classes = sorted(metadata.keys())
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
for class_label, im_rel_paths in metadata.items():
self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths)
self._image_files += [
self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder))
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
from qdrant_client.http import models
from qdrant_client.http.models.models import Distance
if TYPE_CHECKING: # pragma: no cover
import numpy as np
import tensorflow
import torch
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self,
q: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None
if not search_params
else models.SearchParams(**search_params),
limit=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:param search_params: additional parameters of the search
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [
self._find_similar_vectors(
query, limit=limit, filter=filter, search_params=search_params
)
]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(
q, limit=limit, filter=filter, search_params=search_params
)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=models.Filter(**filter),
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
from docarray import Document, DocumentArray
from docarray.math import ndarray
from docarray.score import NamedScore
from qdrant_client.http import models as rest
from qdrant_client.http.models.models import Distance
if TYPE_CHECKING: # pragma: no cover
import numpy as np
import tensorflow
import torch
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self,
q: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None
if not search_params
else rest.SearchParams(**search_params),
limit=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
search_params: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:param search_params: additional parameters of the search
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [
self._find_similar_vectors(
query, limit=limit, filter=filter, search_params=search_params
)
]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(
q, limit=limit, filter=filter, search_params=search_params
)
closest_docs.append(da)
return closest_docs
def _find_with_filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
):
list_of_points, _offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=rest.Filter(**filter),
with_payload=True,
limit=limit,
)
da = DocumentArray()
for result in list_of_points[:limit]:
doc = Document.from_base64(
result.payload['_serialized'], **self.serialize_config
)
da.append(doc)
return da
def _filter(
self, filter: Optional[Dict], limit: Optional[Union[int, float]] = 10
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (`Qdrant` filter)..
:param limit: number of retrieved items
:param filter: filter query used for filtering.
For more information: https://docarray.jina.ai/advanced/document-store/qdrant/#qdrant
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
return self._find_with_filter(filter, limit=limit)
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/3.x/configs'
files = sorted(glob.glob('../../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../../configs', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
#!/usr/bin/env python
import functools as func
import glob
import os.path as osp
import re
import numpy as np
url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/configs'
files = sorted(glob.glob('../../configs/*/README.md'))
stats = []
titles = []
num_ckpts = 0
for f in files:
url = osp.dirname(f.replace('../../configs', url_prefix))
with open(f, 'r') as content_file:
content = content_file.read()
title = content.split('\n')[0].replace('# ', '').strip()
ckpts = set(x.lower().strip()
for x in re.findall(r'\[model\]\((https?.*)\)', content))
if len(ckpts) == 0:
continue
_papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)]
assert len(_papertype) > 0
papertype = _papertype[0]
paper = set([(papertype, title)])
titles.append(title)
num_ckpts += len(ckpts)
statsmsg = f"""
\t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts)
"""
stats.append((paper, ckpts, statsmsg))
allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats])
msglist = '\n'.join(x for _, _, x in stats)
papertypes, papercounts = np.unique([t for t, _ in allpapers],
return_counts=True)
countstr = '\n'.join(
[f' - {t}: {c}' for t, c in zip(papertypes, papercounts)])
modelzoo = f"""
# Model Zoo Statistics
* Number of papers: {len(set(titles))}
{countstr}
* Number of checkpoints: {num_ckpts}
{msglist}
"""
with open('modelzoo_statistics.md', 'w') as f:
f.write(modelzoo)
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
train_cfg = dict(by_epoch=True, max_epochs=24)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
"""Retrieval evaluators."""
from typing import List, Optional, Tuple
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.bridge.pydantic import Field, SerializeAsAny
from llama_index.core.evaluation.retrieval.base import (
BaseRetrievalEvaluator,
RetrievalEvalMode,
)
from llama_index.core.indices.base_retriever import BaseRetriever
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import ImageNode, TextNode
class RetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[SerializeAsAny[BaseNodePostprocessor]]] = Field(
default=None, description="Optional post-processor"
)
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids and texts, potentially applying a post-processor."""
retrieved_nodes = await self.retriever.aretrieve(query)
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
return (
[node.node.node_id for node in retrieved_nodes],
[node.text for node in retrieved_nodes],
)
class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[SerializeAsAny[BaseNodePostprocessor]]] = Field(
default=None, description="Optional post-processor"
)
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids."""
retrieved_nodes = await self.retriever.aretrieve(query)
image_nodes: List[ImageNode] = []
text_nodes: List[TextNode] = []
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
for scored_node in retrieved_nodes:
node = scored_node.node
if isinstance(node, ImageNode):
image_nodes.append(node)
if isinstance(node, TextNode):
text_nodes.append(node)
if mode == "text":
return (
[node.node_id for node in text_nodes],
[node.text for node in text_nodes],
)
elif mode == "image":
return (
[node.node_id for node in image_nodes],
[node.text for node in image_nodes],
)
else:
raise ValueError("Unsupported mode.")
|
"""Retrieval evaluators."""
from typing import List, Optional, Tuple
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.bridge.pydantic import Field, SerializeAsAny
from llama_index.core.evaluation.retrieval.base import (
BaseRetrievalEvaluator,
RetrievalEvalMode,
)
from llama_index.core.indices.base_retriever import BaseRetriever
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.schema import ImageNode, TextNode
class RetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[SerializeAsAny[BaseNodePostprocessor]]] = Field(
default=None, description="Optional post-processor"
)
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids and texts, potentially applying a post-processor."""
retrieved_nodes = await self.retriever.aretrieve(query)
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
return (
[node.node.node_id for node in retrieved_nodes],
[node.node.text for node in retrieved_nodes],
)
class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator):
"""Retriever evaluator.
This module will evaluate a retriever using a set of metrics.
Args:
metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate
retriever: Retriever to evaluate.
node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval.
"""
retriever: BaseRetriever = Field(..., description="Retriever to evaluate")
node_postprocessors: Optional[List[SerializeAsAny[BaseNodePostprocessor]]] = Field(
default=None, description="Optional post-processor"
)
async def _aget_retrieved_ids_and_texts(
self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT
) -> Tuple[List[str], List[str]]:
"""Get retrieved ids."""
retrieved_nodes = await self.retriever.aretrieve(query)
image_nodes: List[ImageNode] = []
text_nodes: List[TextNode] = []
if self.node_postprocessors:
for node_postprocessor in self.node_postprocessors:
retrieved_nodes = node_postprocessor.postprocess_nodes(
retrieved_nodes, query_str=query
)
for scored_node in retrieved_nodes:
node = scored_node.node
if isinstance(node, ImageNode):
image_nodes.append(node)
if node.text:
text_nodes.append(node)
if mode == "text":
return (
[node.node_id for node in text_nodes],
[node.text for node in text_nodes],
)
elif mode == "image":
return (
[node.node_id for node in image_nodes],
[node.text for node in image_nodes],
)
else:
raise ValueError("Unsupported mode.")
|
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
import numpy as np
from docarray import Document, DocumentArray
from docarray.array.mixins.find import FindMixin as BaseFindMixin
from docarray.math import ndarray
from docarray.math.ndarray import to_numpy_array
from docarray.score import NamedScore
from redis.commands.search.query import Query
from redis.commands.search.querystring import (
DistjunctUnion,
IntersectNode,
equal,
ge,
gt,
intersect,
le,
lt,
union,
)
if TYPE_CHECKING:
import tensorflow
import torch
RedisArrayType = TypeVar(
'RedisArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
Dict,
)
class FindMixin(BaseFindMixin):
def _find_similar_vectors(
self,
query: 'RedisArrayType',
filter: Optional[Dict] = None,
limit: int = 20,
**kwargs,
):
if filter:
nodes = _build_query_nodes(filter)
query_str = intersect(*nodes).to_string()
else:
query_str = '*'
q = (
Query(f'({query_str})=>[KNN {limit} @embedding $vec AS vector_score]')
.sort_by('vector_score')
.paging(0, limit)
.dialect(2)
)
query_params = {'vec': to_numpy_array(query).astype(np.float32).tobytes()}
results = (
self._client.ft(index_name=self._config.index_name)
.search(q, query_params)
.docs
)
da = DocumentArray()
for res in results:
doc = Document.from_base64(res.blob.encode())
doc.scores['score'] = NamedScore(value=res.vector_score)
da.append(doc)
return da
def _find(
self,
query: 'RedisArrayType',
limit: int = 20,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
query = np.array(query)
num_rows, n_dim = ndarray.get_array_rows(query)
if n_dim != 2:
query = query.reshape((num_rows, -1))
return [
self._find_similar_vectors(q, filter=filter, limit=limit, **kwargs)
for q in query
]
def _find_with_filter(self, filter: Dict, limit: int = 20):
nodes = _build_query_nodes(filter)
query_str = intersect(*nodes).to_string()
q = Query(query_str)
q.paging(0, limit)
results = self._client.ft(index_name=self._config.index_name).search(q).docs
da = DocumentArray()
for res in results:
doc = Document.from_base64(res.blob.encode())
da.append(doc)
return da
def _filter(self, filter: Dict, limit: int = 20) -> 'DocumentArray':
return self._find_with_filter(filter, limit=limit)
def _build_query_node(key, condition):
operator = list(condition.keys())[0]
value = condition[operator]
query_dict = {}
if operator in ['$ne', '$eq']:
if isinstance(value, bool):
query_dict[key] = equal(int(value))
elif isinstance(value, (int, float)):
query_dict[key] = equal(value)
else:
query_dict[key] = value
elif operator == '$gt':
query_dict[key] = gt(value)
elif operator == '$gte':
query_dict[key] = ge(value)
elif operator == '$lt':
query_dict[key] = lt(value)
elif operator == '$lte':
query_dict[key] = le(value)
else:
raise ValueError(
f'Expecting filter operator one of $gt, $gte, $lt, $lte, $eq, $ne, $and OR $or, got {operator} instead'
)
if operator == '$ne':
return DistjunctUnion(**query_dict)
return IntersectNode(**query_dict)
def _build_query_nodes(filter):
nodes = []
for k, v in filter.items():
if k == '$and':
children = _build_query_nodes(v)
node = intersect(*children)
nodes.append(node)
elif k == '$or':
children = _build_query_nodes(v)
node = union(*children)
nodes.append(node)
else:
child = _build_query_node(k, v)
nodes.append(child)
return nodes
|
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, TypeVar, Union
import numpy as np
from docarray import Document, DocumentArray
from docarray.array.mixins.find import FindMixin as BaseFindMixin
from docarray.math import ndarray
from docarray.math.ndarray import to_numpy_array
from docarray.score import NamedScore
from redis.commands.search.query import NumericFilter, Query
if TYPE_CHECKING:
import tensorflow
import torch
RedisArrayType = TypeVar(
'RedisArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
Dict,
)
class FindMixin(BaseFindMixin):
def _find_similar_vectors(
self,
query: 'RedisArrayType',
filter: Optional[Dict] = None,
limit: Optional[Union[int, float]] = 20,
**kwargs,
):
query_str = self._build_query_str(filter) if filter else "*"
q = (
Query(f'{query_str}=>[KNN {limit} @embedding $vec AS vector_score]')
.sort_by('vector_score')
.paging(0, limit)
.dialect(2)
)
query_params = {'vec': to_numpy_array(query).astype(np.float32).tobytes()}
results = (
self._client.ft(index_name=self._config.index_name)
.search(q, query_params)
.docs
)
da = DocumentArray()
for res in results:
doc = Document.from_base64(res.blob.encode())
doc.scores['score'] = NamedScore(value=res.vector_score)
da.append(doc)
return da
def _find(
self,
query: 'RedisArrayType',
limit: Optional[Union[int, float]] = 20,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
query = np.array(query)
num_rows, n_dim = ndarray.get_array_rows(query)
if n_dim != 2:
query = query.reshape((num_rows, -1))
return [
self._find_similar_vectors(q, filter=filter, limit=limit, **kwargs)
for q in query
]
def _find_with_filter(self, filter: Dict, limit: Optional[Union[int, float]] = 20):
s = self._build_query_str(filter)
q = Query(s)
q.paging(0, limit)
results = self._client.ft(index_name=self._config.index_name).search(q).docs
da = DocumentArray()
for res in results:
doc = Document.from_base64(res.blob.encode())
da.append(doc)
return da
def _filter(
self, filter: Dict, limit: Optional[Union[int, float]] = 20
) -> 'DocumentArray':
return self._find_with_filter(filter, limit=limit)
def _build_query_str(self, filter: Dict) -> str:
INF = "+inf"
NEG_INF = "-inf"
s = "("
for key in filter:
operator = list(filter[key].keys())[0]
value = filter[key][operator]
if operator == '$gt':
s += f"@{key}:[({value} {INF}] "
elif operator == '$gte':
s += f"@{key}:[{value} {INF}] "
elif operator == '$lt':
s += f"@{key}:[{NEG_INF} ({value}] "
elif operator == '$lte':
s += f"@{key}:[{NEG_INF} {value}] "
elif operator == '$eq':
if type(value) is int:
s += f"@{key}:[{value} {value}] "
elif type(value) is bool:
s += f"@{key}:[{int(value)} {int(value)}] "
else:
s += f"@{key}:{value} "
elif operator == '$ne':
if type(value) is int:
s += f"-@{key}:[{value} {value}] "
elif type(value) is bool:
s += f"-@{key}:[{int(value)} {int(value)}] "
else:
s += f"-@{key}:{value} "
s += ")"
return s
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.utils import ConfigType, OptMultiConfig
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (list[int]): Strides of input feature maps.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
@property
def num_inputs(self) -> int:
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg: ConfigType,
featmap_strides: List[int]) -> nn.ModuleList:
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config RoI layer operation. Options are modules under
``mmcv/ops`` such as ``RoIAlign``.
featmap_strides (list[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
:obj:`nn.ModuleList`: The RoI extractor modules for each level
feature map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor:
"""Scale RoI coordinates by scale factor.
Args:
rois (Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
pass
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from mmcv import ops
from mmengine.model import BaseModule
from torch import Tensor
from mmdet.utils import ConfigType, OptMultiConfig
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (list[int]): Strides of input feature maps.
init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
dict], optional): Initialization config dict. Defaults to None.
"""
def __init__(self,
roi_layer: ConfigType,
out_channels: int,
featmap_strides: List[int],
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self) -> int:
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg: ConfigType,
featmap_strides: List[int]) -> nn.ModuleList:
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config RoI layer operation. Options are modules under
``mmcv/ops`` such as ``RoIAlign``.
featmap_strides (list[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
:obj:`nn.ModuleList`: The RoI extractor modules for each level
feature map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois: Tensor, scale_factor: float) -> Tensor:
"""Scale RoI coordinates by scale factor.
Args:
rois (Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self,
feats: Tuple[Tensor],
rois: Tensor,
roi_scale_factor: Optional[float] = None) -> Tensor:
"""Extractor ROI feats.
Args:
feats (Tuple[Tensor]): Multi-scale features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
roi_scale_factor (Optional[float]): RoI scale factor.
Defaults to None.
Returns:
Tensor: RoI feature.
"""
pass
|
import os
import librosa
from jina import Executor, Document, DocumentArray
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
encoder = Executor.load_config(os.path.join(cur_dir, '../../config.yml'))
assert str(encoder.vgg_model_path).endswith('vggish_model.ckpt')
assert str(encoder.pca_model_path).endswith('vggish_pca_params.ckpt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(os.path.join(cur_dir, '../test_data/sample.wav'))
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder()
model.encode(doc, parameters={})
assert doc[0].embedding.shape == (128,)
|
import os
import librosa
from jina import Executor, Document, DocumentArray
from tensorflow.python.framework import ops
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_load():
encoder = Executor.load_config(os.path.join(cur_dir, '../../config.yml'))
assert encoder.model_path.endswith('vggish_model.ckpt')
def test_embedding_dimension():
x_audio, sample_rate = librosa.load(os.path.join(cur_dir, '../test_data/sample.wav'))
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
ops.reset_default_graph()
model = VggishAudioEncoder()
model.encode(doc, parameters={})
assert doc[0].embedding.shape[-1] == 128
|
_base_ = [
'../_base_/models/cascade-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 128.
max_gen_len (int, optional): The maximum length of generated sequences. Defaults to 64.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 4.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 128,
max_gen_len: int = 64,
max_batch_size: int = 4,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
prompts = [
# For these prompts, the expected answer is the natural continuation of the prompt
"I believe the meaning of life is",
"Simply put, the theory of relativity states that ",
"""A brief message congratulating the team on the launch:
Hi everyone,
I just """,
# Few shot prompt (providing a few examples before asking model to complete more);
"""Translate English to French:
sea otter => loutre de mer
peppermint => menthe poivrée
plush girafe => girafe peluche
cheese =>""",
]
results = generator.text_completion(
prompts,
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for prompt, result in zip(prompts, results):
print(prompt)
print(f"> {result['generation']}")
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/')))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
model = dict(
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
image_size = (1024, 1024)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
pad_cfg = dict(img=(128, 128, 128), masks=0, seg=255)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
# large scale jittering
dict(
type='Resize',
img_scale=image_size,
ratio_range=(0.1, 2.0),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='Pad', size=image_size, pad_val=pad_cfg),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle', img_to_float=True),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32, pad_val=pad_cfg),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
data = dict(
_delete_=True,
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@mmcv.jit(derivate=True, coderize=True)
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@LOSSES.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
|
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@mmcv.jit(derivate=True, coderize=True)
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@LOSSES.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
|
from typing import Dict, Tuple, Optional, List
import numpy as np
from jina import Executor, DocumentArray, requests, Document
from jina.types.arrays.memmap import DocumentArrayMemmap
class SimpleIndexer(Executor):
"""
A simple indexer that stores all the Document data together,
in a DocumentArrayMemmap object
To be used as a unified indexer, combining both indexing and searching
"""
def __init__(
self,
index_file_name: str,
default_traversal_paths: Optional[List[str]] = None,
default_top_k: int = 5,
distance_metric: str = 'cosine',
**kwargs,
):
"""
Initializer function for the simple indexer
:param index_file_name: The file name for the index file
:param default_traversal_paths: The default traversal path that is used
if no traversal path is given in the parameters of the request.
This defaults to ['r'].
:param default_top_k: default value for the top_k parameter
:param distance_metric: The distance metric to be used for finding the
most similar embeddings. Either 'euclidean' or 'cosine'.
"""
super().__init__(**kwargs)
self._docs = DocumentArrayMemmap(self.workspace + f'/{index_file_name}')
self.default_traversal_paths = default_traversal_paths or ['r']
self.default_top_k = default_top_k
if distance_metric == 'cosine':
self.distance = _cosine
elif distance_metric == 'euclidean':
self.distance = _euclidean
else:
raise ValueError('This distance metric is not available!')
self._flush = True
self._docs_embeddings = None
@property
def index_embeddings(self):
if self._flush:
self._docs_embeddings = np.stack(self._docs.get_attributes('embedding'))
self._flush = False
return self._docs_embeddings
@requests(on='/index')
def index(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
"""All Documents to the DocumentArray
:param docs: the docs to add
:param parameters: the parameters dictionary
"""
if not docs: return
traversal_path = parameters.get('traversal_paths', self.default_traversal_paths)
flat_docs = docs.traverse_flat(traversal_path)
self._docs.extend(flat_docs)
self._flush = True
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
"""Perform a vector similarity search and retrieve the full Document match
:param docs: the Documents to search with
:param parameters: the parameters for the search"""
if not docs: return
traversal_path = parameters.get('traversal_paths', self.default_traversal_paths)
top_k = parameters.get('top_k', self.default_top_k)
flat_docs = docs.traverse_flat(traversal_path)
a = np.stack(flat_docs.get_attributes('embedding'))
b = self.index_embeddings
q_emb = _ext_A(_norm(a))
d_emb = _ext_B(_norm(b))
dists = self.distance(q_emb, d_emb)
idx, dist = self._get_sorted_top_k(dists, int(top_k))
for _q, _ids, _dists in zip(flat_docs, idx, dist):
for _id, _dist in zip(_ids, _dists):
d = Document(self._docs[int(_id)], copy=True)
d.scores['cosine'] = 1 - _dist
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
@requests(on='/fill_embedding')
def fill_embedding(self, docs: DocumentArray, **kwargs):
"""retrieve embedding of Documents by id
:param docs: DocumentArray to search with
"""
if not docs: return
for doc in docs:
doc.embedding = self._docs[doc.id].embedding
def _ext_A(A):
nA, dim = A.shape
A_ext = np.ones((nA, dim * 3))
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A ** 2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = np.ones((dim * 3, nB))
B_ext[:dim] = (B ** 2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
from typing import Dict, Tuple, Optional, List
import numpy as np
from jina import Executor, DocumentArray, requests, Document
from jina.types.arrays.memmap import DocumentArrayMemmap
class SimpleIndexer(Executor):
"""
A simple indexer that stores all the Document data together,
in a DocumentArrayMemmap object
To be used as a unified indexer, combining both indexing and searching
"""
def __init__(
self,
index_file_name: str,
default_traversal_paths: Optional[List[str]] = None,
default_top_k: int = 5,
distance_metric: str = 'cosine',
**kwargs,
):
"""
Initializer function for the simple indexer
:param index_file_name: The file name for the index file
:param default_traversal_paths: The default traversal path that is used
if no traversal path is given in the parameters of the request.
This defaults to ['r'].
:param default_top_k: default value for the top_k parameter
:param distance_metric: The distance metric to be used for finding the
most similar embeddings. Either 'euclidean' or 'cosine'.
"""
super().__init__(**kwargs)
self._docs = DocumentArrayMemmap(self.workspace + f'/{index_file_name}')
self.default_traversal_paths = default_traversal_paths or ['r']
self.default_top_k = default_top_k
if distance_metric == 'cosine':
self.distance = _cosine
elif distance_metric == 'euclidean':
self.distance = _euclidean
else:
raise ValueError('This distance metric is not available!')
self._flush = True
self._docs_embeddings = None
@property
def index_embeddings(self):
if self._flush:
self._docs_embeddings = np.stack(self._docs.get_attributes('embedding'))
self._flush = False
return self._docs_embeddings
@requests(on='/index')
def index(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
"""All Documents to the DocumentArray
:param docs: the docs to add
:param parameters: the parameters dictionary
"""
traversal_path = parameters.get('traversal_paths', self.default_traversal_paths)
flat_docs = docs.traverse_flat(traversal_path)
self._docs.extend(flat_docs)
self._flush = True
@requests(on='/search')
def search(self, docs: 'DocumentArray', parameters: Dict, **kwargs):
"""Perform a vector similarity search and retrieve the full Document match
:param docs: the Documents to search with
:param parameters: the parameters for the search"""
traversal_path = parameters.get('traversal_paths', self.default_traversal_paths)
top_k = parameters.get('top_k', self.default_top_k)
flat_docs = docs.traverse_flat(traversal_path)
a = np.stack(flat_docs.get_attributes('embedding'))
b = self.index_embeddings
q_emb = _ext_A(_norm(a))
d_emb = _ext_B(_norm(b))
dists = self.distance(q_emb, d_emb)
idx, dist = self._get_sorted_top_k(dists, int(top_k))
for _q, _ids, _dists in zip(flat_docs, idx, dist):
for _id, _dist in zip(_ids, _dists):
d = Document(self._docs[int(_id)], copy=True)
d.scores['cosine'] = 1 - _dist
_q.matches.append(d)
@staticmethod
def _get_sorted_top_k(
dist: 'np.array', top_k: int
) -> Tuple['np.ndarray', 'np.ndarray']:
if top_k >= dist.shape[1]:
idx = dist.argsort(axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx, axis=1)
else:
idx_ps = dist.argpartition(kth=top_k, axis=1)[:, :top_k]
dist = np.take_along_axis(dist, idx_ps, axis=1)
idx_fs = dist.argsort(axis=1)
idx = np.take_along_axis(idx_ps, idx_fs, axis=1)
dist = np.take_along_axis(dist, idx_fs, axis=1)
return idx, dist
@requests(on='/fill_embedding')
def fill_embedding(self, docs: DocumentArray, **kwargs):
"""retrieve embedding of Documents by id
:param docs: DocumentArray to search with
"""
for doc in docs:
doc.embedding = self._docs[doc.id].embedding
def _ext_A(A):
nA, dim = A.shape
A_ext = np.ones((nA, dim * 3))
A_ext[:, dim : 2 * dim] = A
A_ext[:, 2 * dim :] = A ** 2
return A_ext
def _ext_B(B):
nB, dim = B.shape
B_ext = np.ones((dim * 3, nB))
B_ext[:dim] = (B ** 2).T
B_ext[dim : 2 * dim] = -2.0 * B.T
del B
return B_ext
def _euclidean(A_ext, B_ext):
sqdist = A_ext.dot(B_ext).clip(min=0)
return np.sqrt(sqdist)
def _norm(A):
return A / np.linalg.norm(A, ord=2, axis=1, keepdims=True)
def _cosine(A_norm_ext, B_norm_ext):
return A_norm_ext.dot(B_norm_ext).clip(min=0) / 2
|
from typing import Any, ForwardRef, Optional, Union
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set, Union))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
or x == ID
):
return False
return issubclass(x, a_tuple)
|
from typing import Any, ForwardRef, Optional
from typing_extensions import get_origin
from typing_inspect import get_args, is_typevar, is_union_type
from docarray.typing.id import ID
from docarray.typing.tensor.abstract_tensor import AbstractTensor
def is_type_tensor(type_: Any) -> bool:
"""Return True if type is a type Tensor or an Optional Tensor type."""
return isinstance(type_, type) and issubclass(type_, AbstractTensor)
def is_tensor_union(type_: Any) -> bool:
"""Return True if type is a Union of type Tensors."""
is_union = is_union_type(type_)
if is_union is None:
return False
else:
return is_union and all(
(is_type_tensor(t) or issubclass(t, type(None))) for t in get_args(type_)
)
def change_cls_name(cls: type, new_name: str, scope: Optional[dict] = None) -> None:
"""Change the name of a class.
:param cls: the class to change the name of
:param new_name: the new name
:param scope: the scope in which the class is defined
"""
if scope:
scope[new_name] = cls
cls.__qualname__ = cls.__qualname__[: -len(cls.__name__)] + new_name
cls.__name__ = new_name
def safe_issubclass(x: type, a_tuple: type) -> bool:
"""
This is a modified version of the built-in 'issubclass' function to support non-class input.
Traditional 'issubclass' calls can result in a crash if the input is non-class type (e.g. list/tuple).
:param x: A class 'x'
:param a_tuple: A class, or a tuple of classes.
:return: A boolean value - 'True' if 'x' is a subclass of 'A_tuple', 'False' otherwise.
Note that if the origin of 'x' is a list or tuple, the function immediately returns 'False'.
"""
if (
(get_origin(x) in (list, tuple, dict, set))
or is_typevar(x)
or (type(x) == ForwardRef)
or is_typevar(x)
or x == ID
):
return False
return issubclass(x, a_tuple)
|
_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './mask_rcnn_hrnetv2p_w40_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True
)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""Kept for backwards compatibility."""
from langchain_text_splitters import (
Language,
RecursiveCharacterTextSplitter,
TextSplitter,
Tokenizer,
TokenTextSplitter,
)
from langchain_text_splitters.base import split_text_on_tokens
from langchain_text_splitters.character import CharacterTextSplitter
from langchain_text_splitters.html import ElementType, HTMLHeaderTextSplitter
from langchain_text_splitters.json import RecursiveJsonSplitter
from langchain_text_splitters.konlpy import KonlpyTextSplitter
from langchain_text_splitters.latex import LatexTextSplitter
from langchain_text_splitters.markdown import (
HeaderType,
LineType,
MarkdownHeaderTextSplitter,
MarkdownTextSplitter,
)
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.python import PythonCodeTextSplitter
from langchain_text_splitters.sentence_transformers import (
SentenceTransformersTokenTextSplitter,
)
from langchain_text_splitters.spacy import SpacyTextSplitter
__all__ = [
"CharacterTextSplitter",
"ElementType",
"HTMLHeaderTextSplitter",
"HeaderType",
"KonlpyTextSplitter",
"Language",
"LatexTextSplitter",
"LineType",
"MarkdownHeaderTextSplitter",
"MarkdownTextSplitter",
"NLTKTextSplitter",
"PythonCodeTextSplitter",
"RecursiveCharacterTextSplitter",
"RecursiveJsonSplitter",
"SentenceTransformersTokenTextSplitter",
"SpacyTextSplitter",
"TextSplitter",
"TokenTextSplitter",
"Tokenizer",
"split_text_on_tokens",
]
|
"""Kept for backwards compatibility."""
from langchain_text_splitters import (
Language,
RecursiveCharacterTextSplitter,
TextSplitter,
Tokenizer,
TokenTextSplitter,
)
from langchain_text_splitters.base import split_text_on_tokens
from langchain_text_splitters.character import CharacterTextSplitter
from langchain_text_splitters.html import ElementType, HTMLHeaderTextSplitter
from langchain_text_splitters.json import RecursiveJsonSplitter
from langchain_text_splitters.konlpy import KonlpyTextSplitter
from langchain_text_splitters.latex import LatexTextSplitter
from langchain_text_splitters.markdown import (
HeaderType,
LineType,
MarkdownHeaderTextSplitter,
MarkdownTextSplitter,
)
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.python import PythonCodeTextSplitter
from langchain_text_splitters.sentence_transformers import (
SentenceTransformersTokenTextSplitter,
)
from langchain_text_splitters.spacy import SpacyTextSplitter
__all__ = [
"TokenTextSplitter",
"TextSplitter",
"Tokenizer",
"Language",
"RecursiveCharacterTextSplitter",
"RecursiveJsonSplitter",
"LatexTextSplitter",
"PythonCodeTextSplitter",
"KonlpyTextSplitter",
"SpacyTextSplitter",
"NLTKTextSplitter",
"split_text_on_tokens",
"SentenceTransformersTokenTextSplitter",
"ElementType",
"HeaderType",
"LineType",
"HTMLHeaderTextSplitter",
"MarkdownHeaderTextSplitter",
"MarkdownTextSplitter",
"CharacterTextSplitter",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, MODELS,
OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, PARAM_SCHEDULERS,
RUNNER_CONSTRUCTORS, RUNNERS, TASK_UTILS, TRANSFORMS,
WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS', 'PARAM_SCHEDULERS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .registry import Registry, build_from_cfg
from .root import (DATA_SAMPLERS, DATASETS, HOOKS, MODELS,
OPTIMIZER_CONSTRUCTORS, OPTIMIZERS, RUNNER_CONSTRUCTORS,
RUNNERS, TASK_UTILS, TRANSFORMS, WEIGHT_INITIALIZERS)
__all__ = [
'Registry', 'build_from_cfg', 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'HOOKS',
'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', 'WEIGHT_INITIALIZERS',
'OPTIMIZERS', 'OPTIMIZER_CONSTRUCTORS', 'TASK_UTILS'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-float(1 + steps) / self.gamma) + self.momentum
averaged_param.lerp_(source_param, momentum)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmengine.model import ExponentialMovingAverage
from torch import Tensor
from mmdet.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(ExponentialMovingAverage):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLOX.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False) -> None:
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int) -> None:
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-float(1 + steps) / self.gamma) + self.momentum
averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum)
|
"""JSON node parser."""
import json
from typing import Any, Dict, Generator, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
class JSONNodeParser(NodeParser):
"""
JSON node parser.
Splits a document into Nodes using custom JSON splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "JSONNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "JSONNodeParser"
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
try:
data = json.loads(text)
except json.JSONDecodeError:
# Handle invalid JSON input here
return []
json_nodes = []
if isinstance(data, dict):
lines = [*self._depth_first_yield(data, 0, [])]
json_nodes.extend(
build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func)
)
elif isinstance(data, list):
for json_object in data:
lines = [*self._depth_first_yield(json_object, 0, [])]
json_nodes.extend(
build_nodes_from_splits(
["\n".join(lines)], node, id_func=self.id_func
)
)
else:
raise ValueError("JSON is invalid")
return json_nodes
def _depth_first_yield(
self, json_data: Dict, levels_back: int, path: List[str]
) -> Generator[str, None, None]:
"""
Do depth first yield of all of the leaf nodes of a JSON.
Combines keys in the JSON tree using spaces.
If levels_back is set to 0, prints all levels.
"""
if isinstance(json_data, dict):
for key, value in json_data.items():
new_path = path[:]
new_path.append(key)
yield from self._depth_first_yield(value, levels_back, new_path)
elif isinstance(json_data, list):
for _, value in enumerate(json_data):
yield from self._depth_first_yield(value, levels_back, path)
else:
new_path = path[-levels_back:]
new_path.append(str(json_data))
yield " ".join(new_path)
|
"""JSON node parser."""
import json
from typing import Any, Dict, Generator, List, Optional, Sequence
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import NodeParser
from llama_index.core.node_parser.node_utils import build_nodes_from_splits
from llama_index.core.schema import BaseNode, MetadataMode, TextNode
from llama_index.core.utils import get_tqdm_iterable
class JSONNodeParser(NodeParser):
"""JSON node parser.
Splits a document into Nodes using custom JSON splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "JSONNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "JSONNodeParser"
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
try:
data = json.loads(text)
except json.JSONDecodeError:
# Handle invalid JSON input here
return []
json_nodes = []
if isinstance(data, dict):
lines = [*self._depth_first_yield(data, 0, [])]
json_nodes.extend(
build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func)
)
elif isinstance(data, list):
for json_object in data:
lines = [*self._depth_first_yield(json_object, 0, [])]
json_nodes.extend(
build_nodes_from_splits(
["\n".join(lines)], node, id_func=self.id_func
)
)
else:
raise ValueError("JSON is invalid")
return json_nodes
def _depth_first_yield(
self, json_data: Dict, levels_back: int, path: List[str]
) -> Generator[str, None, None]:
"""Do depth first yield of all of the leaf nodes of a JSON.
Combines keys in the JSON tree using spaces.
If levels_back is set to 0, prints all levels.
"""
if isinstance(json_data, dict):
for key, value in json_data.items():
new_path = path[:]
new_path.append(key)
yield from self._depth_first_yield(value, levels_back, new_path)
elif isinstance(json_data, list):
for _, value in enumerate(json_data):
yield from self._depth_first_yield(value, levels_back, path)
else:
new_path = path[-levels_back:]
new_path.append(str(json_data))
yield " ".join(new_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.