input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from typing import Any, Dict
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. v2betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (torch.Tensor,)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
from typing import Any, Dict
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2.utils import is_simple_tensor
class UniformTemporalSubsample(Transform):
"""[BETA] Uniformly subsample ``num_samples`` indices from the temporal dimension of the video.
.. v2betastatus:: UniformTemporalSubsample transform
Videos are expected to be of shape ``[..., T, C, H, W]`` where ``T`` denotes the temporal dimension.
When ``num_samples`` is larger than the size of temporal dimension of the video, it
will sample frames based on nearest neighbor interpolation.
Args:
num_samples (int): The number of equispaced samples to be selected
"""
_transformed_types = (is_simple_tensor, datapoints.Video)
def __init__(self, num_samples: int):
super().__init__()
self.num_samples = num_samples
def _transform(self, inpt: datapoints._VideoType, params: Dict[str, Any]) -> datapoints._VideoType:
return F.uniform_temporal_subsample(inpt, self.num_samples)
|
from __future__ import annotations
import json
import os
from typing import Callable
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function: Callable[[Tensor], Tensor] | None = nn.Tanh(),
init_weight: Tensor | None = None,
init_bias: Tensor | None = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = nn.Identity() if activation_function is None else activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
"""Experiment with different models."""
from __future__ import annotations
from collections.abc import Sequence
from typing import Optional
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[list[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
raise ValueError(
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
if len(chain.input_keys) != 1:
raise ValueError(
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
if len(chain.output_keys) != 1:
raise ValueError(
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None:
if len(names) != len(chains):
raise ValueError("Length of chains does not match length of names.")
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
"""Experiment with different models."""
from __future__ import annotations
from typing import List, Optional, Sequence
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.utils.input import get_color_mapping, print_text
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
class ModelLaboratory:
"""A utility to experiment with and compare the performance of different models."""
def __init__(self, chains: Sequence[Chain], names: Optional[List[str]] = None):
"""Initialize the ModelLaboratory with chains to experiment with.
Args:
chains (Sequence[Chain]): A sequence of chains to experiment with.
Each chain must have exactly one input and one output variable.
names (Optional[List[str]]): Optional list of names corresponding to each chain.
If provided, its length must match the number of chains.
Raises:
ValueError: If any chain is not an instance of `Chain`.
ValueError: If a chain does not have exactly one input variable.
ValueError: If a chain does not have exactly one output variable.
ValueError: If the length of `names` does not match the number of chains.
"""
for chain in chains:
if not isinstance(chain, Chain):
raise ValueError(
"ModelLaboratory should now be initialized with Chains. "
"If you want to initialize with LLMs, use the `from_llms` method "
"instead (`ModelLaboratory.from_llms(...)`)"
)
if len(chain.input_keys) != 1:
raise ValueError(
"Currently only support chains with one input variable, "
f"got {chain.input_keys}"
)
if len(chain.output_keys) != 1:
raise ValueError(
"Currently only support chains with one output variable, "
f"got {chain.output_keys}"
)
if names is not None:
if len(names) != len(chains):
raise ValueError("Length of chains does not match length of names.")
self.chains = chains
chain_range = [str(i) for i in range(len(self.chains))]
self.chain_colors = get_color_mapping(chain_range)
self.names = names
@classmethod
def from_llms(
cls, llms: List[BaseLLM], prompt: Optional[PromptTemplate] = None
) -> ModelLaboratory:
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
Args:
llms (List[BaseLLM]): A list of LLMs to experiment with.
prompt (Optional[PromptTemplate]): An optional prompt to use with the LLMs.
If provided, the prompt must contain exactly one input variable.
Returns:
ModelLaboratory: An instance of `ModelLaboratory` initialized with LLMs.
"""
if prompt is None:
prompt = PromptTemplate(input_variables=["_input"], template="{_input}")
chains = [LLMChain(llm=llm, prompt=prompt) for llm in llms]
names = [str(llm) for llm in llms]
return cls(chains, names=names)
def compare(self, text: str) -> None:
"""Compare model outputs on an input text.
If a prompt was provided with starting the laboratory, then this text will be
fed into the prompt. If no prompt was provided, then the input text is the
entire prompt.
Args:
text: input text to run all models on.
"""
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
for i, chain in enumerate(self.chains):
if self.names is not None:
name = self.names[i]
else:
name = str(chain)
print_text(name, end="\n")
output = chain.run(text)
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'test-executor-torch': '0.13.1',
'executor-merger': '0.1.1',
'custom-gateway': '0.1.1',
'multiprotocol-gateway': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
class DockerComposeServices:
healthy_status = 'healthy'
unhealthy_status = 'unhealthy'
def __init__(self, dump_path, timeout_second=30):
self.dump_path = dump_path
self.timeout_second = timeout_second
def __enter__(self):
subprocess.run(
f'docker-compose -f {self.dump_path} up --build -d --remove-orphans'.split(
' '
)
)
container_ids = (
subprocess.run(
f'docker-compose -f {self.dump_path} ps -q'.split(' '),
capture_output=True,
)
.stdout.decode("utf-8")
.split('\n')
)
container_ids.remove('') # remove empty return line
if not container_ids:
raise RuntimeError('docker-compose ps did not detect any launch container')
client = docker.from_env()
init_time = time.time()
healthy = False
while time.time() - init_time < self.timeout_second:
if self._are_all_container_healthy(container_ids, client):
healthy = True
break
time.sleep(0.1)
if not healthy:
raise RuntimeError('Docker containers are not healthy')
@staticmethod
def _are_all_container_healthy(
container_ids: List[str], client: docker.client.DockerClient
) -> bool:
for id_ in container_ids:
status = client.containers.get(id_).attrs['State']['Health']['Status']
if status != DockerComposeServices.healthy_status:
return False
return True
def __exit__(self, exc_type, exc_val, exc_tb):
subprocess.run(
f'docker-compose -f {self.dump_path} down --remove-orphans'.split(' ')
)
|
import os
import subprocess
import time
from typing import List
import docker
import pytest
from jina.logging.logger import JinaLogger
client = docker.from_env()
cur_dir = os.path.dirname(__file__)
@pytest.fixture()
def test_dir() -> str:
return cur_dir
@pytest.fixture
def logger():
return JinaLogger('docker-compose-testing')
@pytest.fixture
def image_name_tag_map():
return {
'reload-executor': '0.13.1',
'test-executor': '0.13.1',
'executor-merger': '0.1.1',
'custom-gateway': '0.1.1',
'multiprotocol-gateway': '0.1.1',
'jinaai/jina': 'test-pip',
}
def build_docker_image(image_name, image_name_tag_map):
logger = JinaLogger('docker-compose-testing')
image_tag = image_name + ':' + image_name_tag_map[image_name]
image, build_logs = client.images.build(
path=os.path.join(cur_dir, image_name), tag=image_tag
)
for chunk in build_logs:
if 'stream' in chunk:
for line in chunk['stream'].splitlines():
logger.debug(line)
return image.tags[-1]
@pytest.fixture(autouse=True)
def set_test_pip_version():
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
del os.environ['JINA_GATEWAY_IMAGE']
@pytest.fixture(autouse=True)
def build_images(image_name_tag_map):
for image in image_name_tag_map.keys():
if image != 'jinaai/jina':
build_docker_image(image, image_name_tag_map)
@pytest.fixture
def docker_images(request, image_name_tag_map):
image_names = request.param
images = [
image_name + ':' + image_name_tag_map[image_name] for image_name in image_names
]
return images
class DockerComposeServices:
healthy_status = 'healthy'
unhealthy_status = 'unhealthy'
def __init__(self, dump_path, timeout_second=30):
self.dump_path = dump_path
self.timeout_second = timeout_second
def __enter__(self):
subprocess.run(
f'docker-compose -f {self.dump_path} up --build -d --remove-orphans'.split(
' '
)
)
container_ids = (
subprocess.run(
f'docker-compose -f {self.dump_path} ps -q'.split(' '),
capture_output=True,
)
.stdout.decode("utf-8")
.split('\n')
)
container_ids.remove('') # remove empty return line
if not container_ids:
raise RuntimeError('docker-compose ps did not detect any launch container')
client = docker.from_env()
init_time = time.time()
healthy = False
while time.time() - init_time < self.timeout_second:
if self._are_all_container_healthy(container_ids, client):
healthy = True
break
time.sleep(0.1)
if not healthy:
raise RuntimeError('Docker containers are not healthy')
@staticmethod
def _are_all_container_healthy(
container_ids: List[str], client: docker.client.DockerClient
) -> bool:
for id_ in container_ids:
status = client.containers.get(id_).attrs['State']['Health']['Status']
if status != DockerComposeServices.healthy_status:
return False
return True
def __exit__(self, exc_type, exc_val, exc_tb):
subprocess.run(
f'docker-compose -f {self.dump_path} down --remove-orphans'.split(' ')
)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode_mean_tokens=False,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=True,
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
"""
This example runs a BiLSTM after the word embedding lookup. The output of the BiLSTM is than pooled,
for example with max-pooling (which gives a system like InferSent) or with mean-pooling.
Note, you can also pass BERT embeddings to the BiLSTM.
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import os
import csv
import gzip
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Read the dataset
batch_size = 32
model_save_path = "output/training_stsbenchmark_bilstm-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
# Map tokens to traditional word embeddings like GloVe
word_embedding_model = models.WordEmbeddings.from_text_file("glove.6B.300d.txt.gz")
lstm = models.LSTM(word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(), hidden_dim=1024)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
lstm.get_word_embedding_dimension(),
pooling_mode_mean_tokens=False,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=True,
)
model = SentenceTransformer(modules=[word_embedding_model, lstm, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=batch_size)
train_loss = losses.CosineSimilarityLoss(model=model)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
num_epochs = 10
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
model.evaluate(evaluator)
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
# model settings
model = dict(
type='YOLOX',
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline,
dynamic_scale=img_scale)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size=img_scale, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=15,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=300)
resume_from = None
interval = 10
custom_hooks = [
dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48),
dict(
type='SyncRandomSizeHook',
ratio_range=(14, 26),
img_scale=img_scale,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=15,
interval=interval,
priority=48),
dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(interval=interval, metric='bbox')
log_config = dict(interval=50)
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
# model settings
model = dict(
type='YOLOX',
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (640, 640)
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline,
dynamic_scale=img_scale)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size=img_scale, pad_val=114.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=15,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=300)
resume_from = None
interval = 10
custom_hooks = [
dict(type='YOLOXModeSwitchHook', num_last_epochs=15, priority=48),
dict(
type='SyncRandomSizeHook',
ratio_range=(14, 26),
img_scale=img_scale,
interval=interval,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=15,
interval=interval,
priority=48),
dict(type='ExpMomentumEMAHook', resume_from=resume_from, priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(interval=interval, metric='bbox')
log_config = dict(interval=50)
|
from docarray.predefined_document.image import Image
from docarray.predefined_document.mesh import Mesh3D
from docarray.predefined_document.point_cloud import PointCloud3D
from docarray.predefined_document.text import Text
__all__ = ['Text', 'Image', 'Mesh3D', 'PointCloud3D']
|
from docarray.predefined_document.image import Image
from docarray.predefined_document.text import Text
__all__ = ['Text', 'Image']
|
"""Standard LangChain interface tests."""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatMistralAI
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.unit_tests import ( # type: ignore[import-not-found]
ChatModelUnitTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelUnitTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatMistralAI
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
NdArray,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls.construct(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import TYPE_CHECKING, Any, Dict, Type, TypeVar
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import DocumentProto, NodeProto
try:
import torch # noqa: F401
except ImportError:
torch_imported = False
else:
from docarray.typing import TorchTensor
torch_imported = True
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray.typing import ( # TorchTensor,
ID,
AnyUrl,
Embedding,
ImageUrl,
NdArray,
TextUrl,
)
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
content_type_dict = dict(
ndarray=NdArray,
embedding=Embedding,
any_url=AnyUrl,
text_url=TextUrl,
image_url=ImageUrl,
id=ID,
)
if torch_imported:
content_type_dict['torch_tensor'] = TorchTensor
if content_type in content_type_dict:
fields[field] = content_type_dict[content_type].from_protobuf(
getattr(value, content_type)
)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
from docarray import DocumentArray
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
from docarray.proto import DocumentProto, NodeProto
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> 'NodeProto':
from docarray.proto import NodeProto
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
# model settings
model = dict(
type='FasterRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
|
# model settings
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
type='FasterRCNN',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
|
"""Simple reader that reads OSMmap data from overpass API."""
import random
import string
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
warnings.filterwarnings("ignore")
class OpenMap(BaseReader):
"""
OpenMap Reader.
Get the map Features from the overpass api(osm) for the given location/area
Args:
localarea(str) - Area or location you are searching for
tag_values(str) - filter for the give area
search_tag(str) - Tag that you are looking for
if you not sure about the search_tag and tag_values visit https://taginfo.openstreetmap.org/tags
remove_keys(list) - list of keys that need to be removed from the response
by default following keys will be removed ['nodes','geometry','members']
"""
def __init__(self) -> None:
"""Initialize with parameters."""
super().__init__()
@staticmethod
def _get_user() -> str:
# choose from all lowercase letter
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(10))
@staticmethod
def _get_latlon(locarea: str, user_agent: str) -> tuple:
try:
from geopy.geocoders import Nominatim
except ImportError:
raise ImportError("install geopy using `pip3 install geopy`")
geolocator = Nominatim(user_agent=user_agent)
location = geolocator.geocode(locarea)
return (location.latitude, location.longitude) if location else (None, None)
def load_data(
self,
localarea: str,
search_tag: Optional[str] = "amenity",
remove_keys: Optional[List] = ["nodes", "geometry", "members"],
tag_only: Optional[bool] = True,
tag_values: Optional[List] = [""],
local_area_buffer: Optional[int] = 2000,
) -> List[Document]:
"""
This loader will bring you the all the node values from the open street maps for the given location.
Args:
localarea(str) - Area or location you are searching for
search_tag(str) - Tag that you are looking for
if you not sure about the search_tag and tag_values visit https://taginfo.openstreetmap.org/tags
remove_keys(list) - list of keys that need to be removed from the response
by default it those keys will be removed ['nodes','geometry','members']
tag_only(bool) - if True it return the nodes which has tags if False returns all the nodes
tag_values(str) - filter for the give area
local_area_buffer(int) - range that you wish to cover (Default 2000(2km))
"""
try:
from osmxtract import location, overpass
from osmxtract.errors import OverpassBadRequest
except ImportError:
raise ImportError("install osmxtract using `pip3 install osmxtract`")
null_list = ["", "null", "none", None]
extra_info = {}
local_area = localarea
if local_area.lower().strip() in null_list:
raise Exception("The Area should not be null")
user = self._get_user()
lat, lon = self._get_latlon(local_area, user)
try:
bounds = location.from_buffer(lat, lon, buffer_size=int(local_area_buffer))
except TypeError:
raise TypeError("Please give valid location name or check for spelling")
# overpass query generation and execution
tag_values = [str(i).lower().strip() for i in tag_values]
query = overpass.ql_query(
bounds, tag=search_tag.lower(), values=tag_values, timeout=500
)
extra_info["overpass_query"] = query
try:
response = overpass.request(query)
except OverpassBadRequest:
raise TypeError(
f"Error while executing the Query {query} please check the Args"
)
res = response["elements"]
_meta = response.copy()
del _meta["elements"]
extra_info["overpass_meta"] = str(_meta)
extra_info["lat"] = lat
extra_info["lon"] = lon
# filtering for only the tag values
filtered = [i for i in res if "tags" in i] if tag_only else res
for key in remove_keys:
[i.pop(key, None) for i in filtered]
if filtered:
return Document(text=str(filtered), extra_info=extra_info)
else:
return Document(text=str(res), extra_info=extra_info)
|
"""Simple reader that reads OSMmap data from overpass API."""
import random
import string
import warnings
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
warnings.filterwarnings("ignore")
class OpenMap(BaseReader):
"""OpenMap Reader.
Get the map Features from the overpass api(osm) for the given location/area
Args:
localarea(str) - Area or location you are searching for
tag_values(str) - filter for the give area
search_tag(str) - Tag that you are looking for
if you not sure about the search_tag and tag_values visit https://taginfo.openstreetmap.org/tags
remove_keys(list) - list of keys that need to be removed from the response
by default following keys will be removed ['nodes','geometry','members']
"""
def __init__(self) -> None:
"""Initialize with parameters."""
super().__init__()
@staticmethod
def _get_user() -> str:
# choose from all lowercase letter
letters = string.ascii_lowercase
return "".join(random.choice(letters) for i in range(10))
@staticmethod
def _get_latlon(locarea: str, user_agent: str) -> tuple:
try:
from geopy.geocoders import Nominatim
except ImportError:
raise ImportError("install geopy using `pip3 install geopy`")
geolocator = Nominatim(user_agent=user_agent)
location = geolocator.geocode(locarea)
return (location.latitude, location.longitude) if location else (None, None)
def load_data(
self,
localarea: str,
search_tag: Optional[str] = "amenity",
remove_keys: Optional[List] = ["nodes", "geometry", "members"],
tag_only: Optional[bool] = True,
tag_values: Optional[List] = [""],
local_area_buffer: Optional[int] = 2000,
) -> List[Document]:
"""
This loader will bring you the all the node values from the open street maps for the given location.
Args:
localarea(str) - Area or location you are searching for
search_tag(str) - Tag that you are looking for
if you not sure about the search_tag and tag_values visit https://taginfo.openstreetmap.org/tags
remove_keys(list) - list of keys that need to be removed from the response
by default it those keys will be removed ['nodes','geometry','members']
tag_only(bool) - if True it return the nodes which has tags if False returns all the nodes
tag_values(str) - filter for the give area
local_area_buffer(int) - range that you wish to cover (Default 2000(2km))
"""
try:
from osmxtract import location, overpass
from osmxtract.errors import OverpassBadRequest
except ImportError:
raise ImportError("install osmxtract using `pip3 install osmxtract`")
null_list = ["", "null", "none", None]
extra_info = {}
local_area = localarea
if local_area.lower().strip() in null_list:
raise Exception("The Area should not be null")
user = self._get_user()
lat, lon = self._get_latlon(local_area, user)
try:
bounds = location.from_buffer(lat, lon, buffer_size=int(local_area_buffer))
except TypeError:
raise TypeError("Please give valid location name or check for spelling")
# overpass query generation and execution
tag_values = [str(i).lower().strip() for i in tag_values]
query = overpass.ql_query(
bounds, tag=search_tag.lower(), values=tag_values, timeout=500
)
extra_info["overpass_query"] = query
try:
response = overpass.request(query)
except OverpassBadRequest:
raise TypeError(
f"Error while executing the Query {query} please check the Args"
)
res = response["elements"]
_meta = response.copy()
del _meta["elements"]
extra_info["overpass_meta"] = str(_meta)
extra_info["lat"] = lat
extra_info["lon"] = lon
# filtering for only the tag values
filtered = [i for i in res if "tags" in i] if tag_only else res
for key in remove_keys:
[i.pop(key, None) for i in filtered]
if filtered:
return Document(text=str(filtered), extra_info=extra_info)
else:
return Document(text=str(res), extra_info=extra_info)
|
_base_ = './mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
|
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast("str", item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
found_documents = []
for id_ in ids:
if id_ in self.store:
found_documents.append(self.store[id_])
return found_documents
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
import operator
import uuid
from collections.abc import Sequence
from typing import Any, Optional, cast
from pydantic import Field
from langchain_core._api import beta
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.indexing import UpsertResponse
from langchain_core.indexing.base import DeleteResponse, DocumentIndex
@beta(message="Introduced in version 0.2.29. Underlying abstraction subject to change.")
class InMemoryDocumentIndex(DocumentIndex):
"""In memory document index.
This is an in-memory document index that stores documents in a dictionary.
It provides a simple search API that returns documents by the number of
counts the given query appears in the document.
.. versionadded:: 0.2.29
"""
store: dict[str, Document] = Field(default_factory=dict)
top_k: int = 4
def upsert(self, items: Sequence[Document], /, **kwargs: Any) -> UpsertResponse:
"""Upsert items into the index."""
ok_ids = []
for item in items:
if item.id is None:
id_ = str(uuid.uuid4())
item_ = item.model_copy()
item_.id = id_
else:
item_ = item
id_ = item.id
self.store[id_] = item_
ok_ids.append(cast(str, item_.id))
return UpsertResponse(succeeded=ok_ids, failed=[])
def delete(self, ids: Optional[list[str]] = None, **kwargs: Any) -> DeleteResponse:
"""Delete by ID."""
if ids is None:
msg = "IDs must be provided for deletion"
raise ValueError(msg)
ok_ids = []
for id_ in ids:
if id_ in self.store:
del self.store[id_]
ok_ids.append(id_)
return DeleteResponse(
succeeded=ok_ids, num_deleted=len(ok_ids), num_failed=0, failed=[]
)
def get(self, ids: Sequence[str], /, **kwargs: Any) -> list[Document]:
"""Get by ids."""
found_documents = []
for id_ in ids:
if id_ in self.store:
found_documents.append(self.store[id_])
return found_documents
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
counts_by_doc = []
for document in self.store.values():
count = document.page_content.count(query)
counts_by_doc.append((document, count))
counts_by_doc.sort(key=operator.itemgetter(1), reverse=True)
return [doc.model_copy() for doc, count in counts_by_doc[: self.top_k]]
|
from docarray.typing.embedding import Embedding
from docarray.typing.id import ID
from docarray.typing.tensor import Tensor, TorchTensor
from docarray.typing.url import AnyUrl, ImageUrl
__all__ = ['Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID', 'TorchTensor']
|
from docarray.typing.embedding import Embedding
from docarray.typing.id import ID
from docarray.typing.tensor import Tensor
from docarray.typing.url import AnyUrl, ImageUrl
__all__ = ['Tensor', 'Embedding', 'ImageUrl', 'AnyUrl', 'ID']
|
_base_ = 'faster-rcnn_r50-caffe_fpn_ms-1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=10000)
evaluation = dict(interval=10000, metric='bbox')
|
_base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[60000, 80000])
# Runner type
runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
checkpoint_config = dict(interval=10000)
evaluation = dict(interval=10000, metric='bbox')
|
# Copyright (c) OpenMMLab. All rights reserved.
import re
from mmengine.config import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, replace "${key0.key1}"
with the value of cfg.key0.key1. Code is modified from `vars.py
< https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501
Args:
ori_cfg (mmengine.config.Config):
The origin config with "${key}" generated from a file.
Returns:
updated_cfg [mmengine.config.Config]:
The config with "${key}" replaced by the corresponding value.
"""
def get_value(cfg, key):
for k in key.split('.'):
cfg = cfg[k]
return cfg
def replace_value(cfg):
if isinstance(cfg, dict):
return {key: replace_value(value) for key, value in cfg.items()}
elif isinstance(cfg, list):
return [replace_value(item) for item in cfg]
elif isinstance(cfg, tuple):
return tuple([replace_value(item) for item in cfg])
elif isinstance(cfg, str):
# the format of string cfg may be:
# 1) "${key}", which will be replaced with cfg.key directly
# 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx",
# which will be replaced with the string of the cfg.key
keys = pattern_key.findall(cfg)
values = [get_value(ori_cfg, key[2:-1]) for key in keys]
if len(keys) == 1 and keys[0] == cfg:
# the format of string cfg is "${key}"
cfg = values[0]
else:
for key, value in zip(keys, values):
# the format of string cfg is
# "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx"
assert not isinstance(value, (dict, list, tuple)), \
f'for the format of string cfg is ' \
f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \
f"the type of the value of '${key}' " \
f'can not be dict, list, or tuple' \
f'but you input {type(value)} in {cfg}'
cfg = cfg.replace(key, str(value))
return cfg
else:
return cfg
# the pattern of string "${key}"
pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}')
# the type of ori_cfg._cfg_dict is mmengine.config.ConfigDict
updated_cfg = Config(
replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)
# replace the model with model_wrapper
if updated_cfg.get('model_wrapper', None) is not None:
updated_cfg.model = updated_cfg.model_wrapper
updated_cfg.pop('model_wrapper')
return updated_cfg
|
# Copyright (c) OpenMMLab. All rights reserved.
import re
from mmengine.config import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, replace "${key0.key1}"
with the value of cfg.key0.key1. Code is modified from `vars.py
< https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501
Args:
ori_cfg (mmcv.utils.config.Config):
The origin config with "${key}" generated from a file.
Returns:
updated_cfg [mmcv.utils.config.Config]:
The config with "${key}" replaced by the corresponding value.
"""
def get_value(cfg, key):
for k in key.split('.'):
cfg = cfg[k]
return cfg
def replace_value(cfg):
if isinstance(cfg, dict):
return {key: replace_value(value) for key, value in cfg.items()}
elif isinstance(cfg, list):
return [replace_value(item) for item in cfg]
elif isinstance(cfg, tuple):
return tuple([replace_value(item) for item in cfg])
elif isinstance(cfg, str):
# the format of string cfg may be:
# 1) "${key}", which will be replaced with cfg.key directly
# 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx",
# which will be replaced with the string of the cfg.key
keys = pattern_key.findall(cfg)
values = [get_value(ori_cfg, key[2:-1]) for key in keys]
if len(keys) == 1 and keys[0] == cfg:
# the format of string cfg is "${key}"
cfg = values[0]
else:
for key, value in zip(keys, values):
# the format of string cfg is
# "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx"
assert not isinstance(value, (dict, list, tuple)), \
f'for the format of string cfg is ' \
f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \
f"the type of the value of '${key}' " \
f'can not be dict, list, or tuple' \
f'but you input {type(value)} in {cfg}'
cfg = cfg.replace(key, str(value))
return cfg
else:
return cfg
# the pattern of string "${key}"
pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}')
# the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict
updated_cfg = Config(
replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)
# replace the model with model_wrapper
if updated_cfg.get('model_wrapper', None) is not None:
updated_cfg.model = updated_cfg.model_wrapper
updated_cfg.pop('model_wrapper')
return updated_cfg
|
from dataclasses import dataclass
from typing import Callable, Dict
import torch
import torchaudio
from ._vggish_impl import _SAMPLE_RATE, VGGish as _VGGish, VGGishInputProcessor as _VGGishInputProcessor
def _get_state_dict():
path = torchaudio.utils.download_asset("models/vggish.pt")
return torch.load(path)
@dataclass
class VGGishBundle:
"""VGGish :cite:`45611` inference pipeline ported from
`torchvggish <https://github.com/harritaylor/torchvggish>`__
and `tensorflow-models <https://github.com/tensorflow/models/tree/master/research/audioset>`__.
Example:
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import VGGISH
>>>
>>> input_sr = VGGISH.sample_rate
>>> input_proc = VGGISH.get_input_processor()
>>> model = VGGISH.get_model()
>>>
>>> waveform, sr = torchaudio.load(
>>> "Chopin_Ballade_-1_In_G_Minor,_Op._23.mp3",
>>> )
>>> waveform = waveform.squeeze(0)
>>> waveform = torchaudio.functional.resample(waveform, sr, input_sr)
>>> mono_output = model(input_proc(waveform))
"""
class VGGish(_VGGish):
__doc__ = _VGGish.__doc__
class VGGishInputProcessor(_VGGishInputProcessor):
__doc__ = _VGGishInputProcessor.__doc__
_state_dict_func: Callable[[], Dict]
@property
def sample_rate(self) -> int:
"""Sample rate of input waveform expected by input processor and model.
:type: int
"""
return _SAMPLE_RATE
def get_model(self) -> VGGish:
"""Constructs pre-trained VGGish model. Downloads and caches weights as necessary.
Returns:
VGGish: VGGish model with pre-trained weights loaded.
"""
model = self.VGGish()
state_dict = self._state_dict_func()
model.load_state_dict(state_dict)
model.eval()
return model
def get_input_processor(self) -> VGGishInputProcessor:
"""Constructs input processor for VGGish.
Returns:
VGGishInputProcessor: input processor for VGGish.
"""
return self.VGGishInputProcessor()
VGGISH = VGGishBundle(_get_state_dict)
VGGISH.__doc__ = """Pre-trained VGGish :cite:`45611` inference pipeline ported from
`torchvggish <https://github.com/harritaylor/torchvggish>`__
and `tensorflow-models <https://github.com/tensorflow/models/tree/master/research/audioset>`__.
Per the `documentation <https://github.com/tensorflow/models/tree/master/research/audioset/vggish>`__
for the original model, the model is "trained on a large YouTube dataset (a preliminary version of
what later became YouTube-8M)".
"""
|
from dataclasses import dataclass
from typing import Callable, Dict
import torch
import torchaudio
from ._vggish_impl import _SAMPLE_RATE, VGGish as _VGGish, VGGishInputProcessor as _VGGishInputProcessor
def _get_state_dict():
path = torchaudio.utils.download_asset("models/vggish.pt")
return torch.load(path)
@dataclass
class VGGishBundle:
"""VGGish :cite:`45611` inference pipeline ported from
`torchvggish <https://github.com/harritaylor/torchvggish>`__
and `tensorflow-models <https://github.com/tensorflow/models/tree/master/research/audioset>`__.
Example:
>>> import torchaudio
>>> from torchaudio.prototype.pipelines import VGGISH
>>>
>>> input_sr = VGGISH.sample_rate
>>> input_proc = VGGISH.get_input_processor()
>>> model = VGGISH.get_model()
>>>
>>> waveform, sr = torchaudio.load(
>>> "Chopin_Ballade_-1_In_G_Minor,_Op._23.mp3",
>>> )
>>> waveform = waveform.squeeze(0)
>>> waveform = torchaudio.functional.resample(waveform, sr, input_sr)
>>> mono_output = model(input_proc(waveform))
"""
class VGGish(_VGGish):
__doc__ = _VGGish.__doc__
class VGGishInputProcessor(_VGGishInputProcessor):
__doc__ = _VGGishInputProcessor.__doc__
_state_dict_func: Callable[[], Dict]
@property
def sample_rate(self) -> int:
"""Sample rate of input waveform expected by input processor and model.
:type: int
"""
return _SAMPLE_RATE
def get_model(self) -> VGGish:
"""Constructs pre-trained VGGish model. Downloads and caches weights as necessary.
Returns:
VGGish: VGGish model with pre-trained weights loaded.
"""
model = self.VGGish()
state_dict = self._state_dict_func()
model.load_state_dict(state_dict)
model.eval()
return model
def get_input_processor(self) -> VGGishInputProcessor:
"""Constructs input processor for VGGish.
Returns:
VGGishInputProcessor: input processor for VGGish.
"""
return self.VGGishInputProcessor()
VGGISH = VGGishBundle(_get_state_dict)
VGGISH.__doc__ = """Pre-trained VGGish :cite:`45611` inference pipeline ported from
`torchvggish <https://github.com/harritaylor/torchvggish>`__
and `tensorflow-models <https://github.com/tensorflow/models/tree/master/research/audioset>`__.
Per the `documentation <https://github.com/tensorflow/models/tree/master/research/audioset/vggish>`__
for the original model, the model is "trained on a large YouTube dataset (a preliminary version of
what later became YouTube-8M)".
"""
|
_base_ = ['../common/ms_3x_coco.py', '../_base_/models/faster-rcnn_r50_fpn.py']
|
_base_ = [
'../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py'
]
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
frozen_stages=0,
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
init_cfg=dict(
type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.preprocessing import image as image
from keras.preprocessing import sequence as sequence
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_dataset_from_directory,
)
from keras.src.utils.text_dataset_utils import (
text_dataset_from_directory as text_dataset_from_directory,
)
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array as timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.preprocessing import image
from keras.api.preprocessing import sequence
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
import os
import jwt # noqa
import pytest
from llama_index.core import Document
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
)
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
def test_class():
names_of_base_classes = [b.__name__ for b in DeepLakeVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
@pytest.fixture()
def vs_ids():
vs = DeepLakeVectorStore(dataset_path="mem://test", overwrite=True)
ids = vs.add(
nodes=[
Document(text="Doc 1", embedding=[1, 2, 1], metadata={"a": "1", "b": 10}),
Document(text="Doc 2", embedding=[1, 2, 2], metadata={"a": "2", "b": 11}),
Document(text="Doc 3", embedding=[1, 2, 3], metadata={"a": "3", "b": 12}),
]
)
yield (vs, ids)
vs.clear()
@pytest.mark.skipif(
os.getenv("GITHUB_ACTIONS") == "true", reason="tests are flaky on Github runners"
)
def test_filters(vs_ids):
vs, ids = vs_ids
nodes = vs.get_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in nodes] == ["Doc 1", "Doc 3"]
nodes = vs.get_nodes(node_ids=["a"])
assert len(nodes) == 0
nodes = vs.get_nodes(
filters=MetadataFilters(filters=[MetadataFilter(key="a", value="2")])
)
assert [x.text for x in nodes] == ["Doc 2"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
]
)
)
assert [x.text for x in nodes] == []
nodes = vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=10, operator=FilterOperator.GT),
]
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=11, operator=FilterOperator.LTE),
]
)
)
assert [x.text for x in nodes] == ["Doc 1", "Doc 2"]
@pytest.mark.skipif(
os.getenv("GITHUB_ACTIONS") == "true", reason="tests are flaky on Github runners"
)
def test_delete_id(vs_ids):
vs, ids = vs_ids
vs.delete_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in vs.get_nodes()] == ["Doc 2"]
@pytest.mark.skipif(
os.getenv("GITHUB_ACTIONS") == "true", reason="tests are flaky on Github runners"
)
def test_delete_filter(vs_ids):
vs, ids = vs_ids
vs.delete_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=10, operator=FilterOperator.GT),
]
)
)
assert [x.text for x in vs.get_nodes()] == ["Doc 1"]
|
import os
import jwt # noqa
import pytest
from llama_index.core import Document
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
FilterCondition,
FilterOperator,
MetadataFilter,
MetadataFilters,
)
from llama_index.vector_stores.deeplake import DeepLakeVectorStore
if os.getenv("GITHUB_ACTIONS") == "true":
pytest.skip("tests are flaky on Github runners", allow_module_level=True)
def test_class():
names_of_base_classes = [b.__name__ for b in DeepLakeVectorStore.__mro__]
assert BasePydanticVectorStore.__name__ in names_of_base_classes
@pytest.fixture()
def vs_ids():
vs = DeepLakeVectorStore(dataset_path="mem://test", overwrite=True)
ids = vs.add(
nodes=[
Document(text="Doc 1", embedding=[1, 2, 1], metadata={"a": "1", "b": 10}),
Document(text="Doc 2", embedding=[1, 2, 2], metadata={"a": "2", "b": 11}),
Document(text="Doc 3", embedding=[1, 2, 3], metadata={"a": "3", "b": 12}),
]
)
yield (vs, ids)
vs.clear()
def test_filters(vs_ids):
vs, ids = vs_ids
nodes = vs.get_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in nodes] == ["Doc 1", "Doc 3"]
nodes = vs.get_nodes(node_ids=["a"])
assert len(nodes) == 0
nodes = vs.get_nodes(
filters=MetadataFilters(filters=[MetadataFilter(key="a", value="2")])
)
assert [x.text for x in nodes] == ["Doc 2"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
]
)
)
assert [x.text for x in nodes] == []
nodes = vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
condition=FilterCondition.OR,
filters=[
MetadataFilter(key="a", value="2"),
MetadataFilter(key="a", value="3"),
],
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=10, operator=FilterOperator.GT),
]
)
)
assert [x.text for x in nodes] == ["Doc 2", "Doc 3"]
nodes = vs.get_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=11, operator=FilterOperator.LTE),
]
)
)
assert [x.text for x in nodes] == ["Doc 1", "Doc 2"]
def test_delete_id(vs_ids):
vs, ids = vs_ids
vs.delete_nodes(node_ids=[ids[0], ids[2]])
assert [x.text for x in vs.get_nodes()] == ["Doc 2"]
def test_delete_filter(vs_ids):
vs, ids = vs_ids
vs.delete_nodes(
filters=MetadataFilters(
filters=[
MetadataFilter(key="b", value=10, operator=FilterOperator.GT),
]
)
)
assert [x.text for x in vs.get_nodes()] == ["Doc 1"]
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import os
from functools import lru_cache
from typing import Union
import ffmpeg
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000: number of samples in a chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000: number of frames in a mel spectrogram input
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
out, _ = (
ffmpeg.input(file, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sr)
.run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(dim=axis, index=torch.arange(length))
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = N_MELS):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[:, :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET) -> None:
super().__init__(level)
def emit(self, record) -> None:
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s") -> None:
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
import logging
import tqdm
class LoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
def install_logger(given_logger, level=logging.WARNING, fmt="%(levelname)s:%(name)s:%(message)s"):
"""Configures the given logger; format, logging level, style, etc"""
import coloredlogs
def add_notice_log_level():
"""Creates a new 'notice' logging level"""
# inspired by:
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
NOTICE_LEVEL_NUM = 25
logging.addLevelName(NOTICE_LEVEL_NUM, "NOTICE")
def notice(self, message, *args, **kws):
if self.isEnabledFor(NOTICE_LEVEL_NUM):
self._log(NOTICE_LEVEL_NUM, message, args, **kws)
logging.Logger.notice = notice
# Add an extra logging level above INFO and below WARNING
add_notice_log_level()
# More style info at:
# https://coloredlogs.readthedocs.io/en/latest/api.html
field_styles = coloredlogs.DEFAULT_FIELD_STYLES.copy()
field_styles["asctime"] = {}
level_styles = coloredlogs.DEFAULT_LEVEL_STYLES.copy()
level_styles["debug"] = {"color": "white", "faint": True}
level_styles["notice"] = {"color": "cyan", "bold": True}
coloredlogs.install(
logger=given_logger,
level=level,
use_chroot=False,
fmt=fmt,
level_styles=level_styles,
field_styles=field_styles,
)
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import gzip
import json
import os
import time
import torch
from sentence_transformers import SentenceTransformer, util
# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search
model_name = "nq-distilbert-base-v1"
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = "data/simplewiki-2020-11-01.jsonl.gz"
if not os.path.exists(wikipedia_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz", wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data["paragraphs"]:
# We encode the passages as [title, text]
passages.append([data["title"], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == "nq-distilbert-base-v1":
embeddings_filepath = "simplewiki-2020-11-01-nq-distilbert-base-v1.pt"
if not os.path.exists(embeddings_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt", embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
device = util.get_device_name()
corpus_embeddings = corpus_embeddings.to(device)
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits:
print("\t{:.3f}\t{}".format(hit["score"], passages[hit["corpus_id"]]))
print("\n\n========\n")
|
"""
This examples demonstrates the setup for Question-Answer-Retrieval.
You can input a query or a question. The script then uses semantic search
to find relevant passages in Simple English Wikipedia (as it is smaller and fits better in RAM).
As model, we use: nq-distilbert-base-v1
It was trained on the Natural Questions dataset, a dataset with real questions from Google Search
together with annotated data from Wikipedia providing the answer. For the passages, we encode the
Wikipedia article tile together with the individual text passages.
Google Colab Example: https://colab.research.google.com/drive/11GunvCqJuebfeTlgbJWkIMT0xJH6PWF1?usp=sharing
"""
import json
from sentence_transformers import SentenceTransformer, util
import time
import gzip
import os
import torch
# We use the Bi-Encoder to encode all passages, so that we can use it with semantic search
model_name = "nq-distilbert-base-v1"
bi_encoder = SentenceTransformer(model_name)
top_k = 5 # Number of passages we want to retrieve with the bi-encoder
# As dataset, we use Simple English Wikipedia. Compared to the full English wikipedia, it has only
# about 170k articles. We split these articles into paragraphs and encode them with the bi-encoder
wikipedia_filepath = "data/simplewiki-2020-11-01.jsonl.gz"
if not os.path.exists(wikipedia_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01.jsonl.gz", wikipedia_filepath)
passages = []
with gzip.open(wikipedia_filepath, "rt", encoding="utf8") as fIn:
for line in fIn:
data = json.loads(line.strip())
for paragraph in data["paragraphs"]:
# We encode the passages as [title, text]
passages.append([data["title"], paragraph])
# If you like, you can also limit the number of passages you want to use
print("Passages:", len(passages))
# To speed things up, pre-computed embeddings are downloaded.
# The provided file encoded the passages with the model 'nq-distilbert-base-v1'
if model_name == "nq-distilbert-base-v1":
embeddings_filepath = "simplewiki-2020-11-01-nq-distilbert-base-v1.pt"
if not os.path.exists(embeddings_filepath):
util.http_get("http://sbert.net/datasets/simplewiki-2020-11-01-nq-distilbert-base-v1.pt", embeddings_filepath)
corpus_embeddings = torch.load(embeddings_filepath)
corpus_embeddings = corpus_embeddings.float() # Convert embedding file to float
device = util.get_device_name()
corpus_embeddings = corpus_embeddings.to(device)
else: # Here, we compute the corpus_embeddings from scratch (which can take a while depending on the GPU)
corpus_embeddings = bi_encoder.encode(passages, convert_to_tensor=True, show_progress_bar=True)
while True:
query = input("Please enter a question: ")
# Encode the query using the bi-encoder and find potentially relevant passages
start_time = time.time()
question_embedding = bi_encoder.encode(query, convert_to_tensor=True)
hits = util.semantic_search(question_embedding, corpus_embeddings, top_k=top_k)
hits = hits[0] # Get the hits for the first query
end_time = time.time()
# Output of top-k hits
print("Input question:", query)
print("Results (after {:.3f} seconds):".format(end_time - start_time))
for hit in hits:
print("\t{:.3f}\t{}".format(hit["score"], passages[hit["corpus_id"]]))
print("\n\n========\n")
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import AgentOutputParser
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> Union[AgentAction, AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return self._parse_ai_message(message)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import AgentOutputParser
class OpenAIFunctionsAgentOutputParser(AgentOutputParser):
"""Parses a message into agent action/finish.
Is meant to be used with OpenAI models, as it relies on the specific
function_call parameter from OpenAI to convey what tools to use.
If a function_call parameter is passed, then that is used to get
the tool and tool input.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-functions-agent"
@staticmethod
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
"""Parse an AI message."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
function_call = message.additional_kwargs.get("function_call", {})
if function_call:
function_name = function_call["name"]
try:
if len(function_call["arguments"].strip()) == 0:
# OpenAI returns an empty string for functions containing no args
_tool_input = {}
else:
# otherwise it returns a json object
_tool_input = json.loads(function_call["arguments"], strict=False)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function_call} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
if "__arg1" in _tool_input:
tool_input = _tool_input["__arg1"]
else:
tool_input = _tool_input
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
return AgentActionMessageLog(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
)
return AgentFinish(
return_values={"output": message.content}, log=str(message.content)
)
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[AgentAction, AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return self._parse_ai_message(message)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
|
import pytest
import datasets
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.s3", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
|
import numpy as np
import pytest
from pydantic import parse_obj_as
from docarray.computation.numpy_backend import NumpyCompBackend
from docarray.typing import NdArray
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_device():
array = np.array([1, 2, 3])
assert NumpyCompBackend.device(array) is None
@pytest.mark.parametrize('dtype', [np.int64, np.float64, np.int, np.float])
def test_dtype(dtype):
array = np.array([1, 2, 3], dtype=dtype)
assert NumpyCompBackend.dtype(array) == dtype
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
def test_stack():
t0 = parse_obj_as(NdArray, np.zeros((3, 224, 224)))
t1 = parse_obj_as(NdArray, np.ones((3, 224, 224)))
stacked1 = NumpyCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, np.ndarray)
assert stacked1.shape == (2, 3, 224, 224)
stacked2 = NumpyCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, np.ndarray)
assert stacked2.shape == (3, 224, 224, 2)
|
import numpy as np
import pytest
from docarray.computation.numpy_backend import NumpyCompBackend
def test_to_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.to_device(np.random.rand(10, 3), 'meta')
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((5)), 1),
(np.zeros((1, 5)), 2),
(np.zeros((5, 5)), 2),
(np.zeros(()), 0),
],
)
def test_n_dim(array, result):
assert NumpyCompBackend.n_dim(array) == result
@pytest.mark.parametrize(
'array,result',
[
(np.zeros((10,)), (10,)),
(np.zeros((5, 5)), (5, 5)),
(np.zeros(()), ()),
],
)
def test_shape(array, result):
shape = NumpyCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
def test_device():
array = np.array([1, 2, 3])
assert NumpyCompBackend.device(array) is None
@pytest.mark.parametrize('dtype', [np.int64, np.float64, np.int, np.float])
def test_dtype(dtype):
array = np.array([1, 2, 3], dtype=dtype)
assert NumpyCompBackend.dtype(array) == dtype
def test_empty():
array = NumpyCompBackend.empty((10, 3))
assert array.shape == (10, 3)
def test_empty_dtype():
tensor = NumpyCompBackend.empty((10, 3), dtype=np.int32)
assert tensor.shape == (10, 3)
assert tensor.dtype == np.int32
def test_empty_device():
with pytest.raises(NotImplementedError):
NumpyCompBackend.empty((10, 3), device='meta')
def test_squeeze():
tensor = np.zeros(shape=(1, 1, 3, 1))
squeezed = NumpyCompBackend.squeeze(tensor)
assert squeezed.shape == (3,)
@pytest.mark.parametrize(
'array,t_range,x_range,result',
[
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), None, np.array([0, 2, 4, 6, 8, 10])),
(np.array([0, 1, 2, 3, 4, 5]), (0, 10), (0, 10), np.array([0, 1, 2, 3, 4, 5])),
(
np.array([[0.0, 1.0], [0.0, 1.0]]),
(0, 10),
None,
np.array([[0.0, 10.0], [0.0, 10.0]]),
),
],
)
def test_minmax_normalize(array, t_range, x_range, result):
output = NumpyCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output, result)
|
"""Simple reader that turns an iterable of strings into a list of Documents."""
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class StringIterableReader(BasePydanticReader):
"""
String Iterable Reader.
Gets a list of documents, given an iterable (e.g. list) of strings.
Example:
.. code-block:: python
from llama_index.core.legacy import StringIterableReader, TreeIndex
documents = StringIterableReader().load_data(
texts=["I went to the store", "I bought an apple"]
)
index = TreeIndex.from_documents(documents)
query_engine = index.as_query_engine()
query_engine.query("what did I buy?")
# response should be something like "You bought an apple."
"""
is_remote: bool = False
@classmethod
def class_name(cls) -> str:
return "StringIterableReader"
def load_data(self, texts: List[str]) -> List[Document]:
"""Load the data."""
results = []
for text in texts:
results.append(Document(text=text))
return results
|
"""Simple reader that turns an iterable of strings into a list of Documents."""
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class StringIterableReader(BasePydanticReader):
"""String Iterable Reader.
Gets a list of documents, given an iterable (e.g. list) of strings.
Example:
.. code-block:: python
from llama_index.core.legacy import StringIterableReader, TreeIndex
documents = StringIterableReader().load_data(
texts=["I went to the store", "I bought an apple"]
)
index = TreeIndex.from_documents(documents)
query_engine = index.as_query_engine()
query_engine.query("what did I buy?")
# response should be something like "You bought an apple."
"""
is_remote: bool = False
@classmethod
def class_name(cls) -> str:
return "StringIterableReader"
def load_data(self, texts: List[str]) -> List[Document]:
"""Load the data."""
results = []
for text in texts:
results.append(Document(text=text))
return results
|
from typing import Union
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.torch_tensor import TorchTensor
Tensor = Union[NdArray, TorchTensor]
|
from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic.fields import ModelField
from pydantic import BaseConfig
from docarray.proto import NdArrayProto, NodeProto
T = TypeVar('T', bound='Tensor')
class Tensor(np.ndarray, AbstractType):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
if isinstance(value, np.ndarray):
return cls.from_ndarray(value)
elif isinstance(value, Tensor):
return cast(T, value)
elif isinstance(value, list) or isinstance(value, tuple):
try:
arr_from_list: np.ndarray = np.asarray(value)
return cls.from_ndarray(arr_from_list)
except Exception:
pass # handled below
else:
try:
arr: np.ndarray = np.ndarray(value)
return cls.from_ndarray(arr)
except Exception:
pass # handled below
raise ValueError(f'Expected a numpy.ndarray compatible type, got {type(value)}')
@classmethod
def from_ndarray(cls: Type[T], value: np.ndarray) -> T:
return value.view(cls)
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
# this is needed to dump to json
field_schema.update(type='string', format='tensor')
def _to_json_compatible(self) -> np.ndarray:
"""
Convert tensor into a json compatible object
:return: a list representation of the tensor
"""
return self.unwrap()
def unwrap(self) -> np.ndarray:
"""
Return the original ndarray without any memory copy.
The original view rest intact and is still a Document Tensor
but the return object is a pure np.ndarray but both object share
the same memory layout.
EXAMPLE USAGE
.. code-block:: python
from docarray.typing import Tensor
import numpy as np
t1 = Tensor.validate(np.zeros((3, 224, 224)), None, None)
# here t is a docarray Tensor
t2 = t.unwrap()
# here t2 is a pure np.ndarray but t1 is still a Docarray Tensor
# But both share the same underlying memory
:return: a numpy ndarray
"""
return self.view(np.ndarray)
def _to_node_protobuf(self: T, field: str = 'tensor') -> NodeProto:
"""Convert itself into a NodeProto protobuf message. This function should
be called when the Document is nested into another Document that need to be
converted into a protobuf
:param field: field in which to store the content in the node proto
:return: the nested item protobuf message
"""
nd_proto = NdArrayProto()
self._flush_tensor_to_proto(nd_proto, value=self)
return NodeProto(**{field: nd_proto})
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T':
"""
read ndarray from a proto msg
:param pb_msg:
:return: a numpy array
"""
source = pb_msg.dense
if source.buffer:
x = np.frombuffer(source.buffer, dtype=source.dtype)
return cls.from_ndarray(x.reshape(source.shape))
elif len(source.shape) > 0:
return cls.from_ndarray(np.zeros(source.shape))
else:
raise ValueError(f'proto message {pb_msg} cannot be cast to a Tensor')
@staticmethod
def _flush_tensor_to_proto(pb_msg: 'NdArrayProto', value: 'Tensor'):
pb_msg.dense.buffer = value.tobytes()
pb_msg.dense.ClearField('shape')
pb_msg.dense.shape.extend(list(value.shape))
pb_msg.dense.dtype = value.dtype.str
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
action='store_true',
help='resume from the latest checkpoint in the work_dir automatically')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
cfg.resume = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
# do not init the default scope here because it will be init in the runner
register_all_modules(init_default_scope=False)
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
|
from keras._tf_keras import keras
|
from keras.api._tf_keras import keras
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_to_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
@pytest.mark.internet
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_to_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_to_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_to_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically Infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/segmentation/VOCdevkit/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/segmentation/',
# 'data/': 's3://openmmlab/datasets/segmentation/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline,
backend_args=backend_args),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1000, 600), keep_ratio=True),
# avoid bboxes being resized
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type='ConcatDataset',
# VOCDataset will add different `dataset_type` in dataset.metainfo,
# which will get error if using ConcatDataset. Adding
# `ignore_keys` can avoid this error.
ignore_keys=['dataset_type'],
datasets=[
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline),
dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2012/ImageSets/Main/trainval.txt',
data_prefix=dict(sub_data_root='VOC2012/'),
filter_cfg=dict(
filter_empty_gt=True, min_size=32, bbox_min_size=32),
pipeline=train_pipeline)
])))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='VOC2007/ImageSets/Main/test.txt',
data_prefix=dict(sub_data_root='VOC2007/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
# Pascal VOC2007 uses `11points` as default evaluate mode, while PASCAL
# VOC2012 defaults to use 'area'.
val_evaluator = dict(type='VOCMetric', metric='mAP', eval_mode='11points')
test_evaluator = val_evaluator
|
# type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migrations_as_grit,
)
from langchain_cli.namespaces.migrate.generate.partner import (
get_migrations_for_partner_package,
)
@click.group()
def cli():
"""Migration scripts management."""
pass
@cli.command()
@click.option(
"--pkg1",
default="langchain",
)
@click.option(
"--pkg2",
default="langchain_community",
)
@click.option(
"--output",
default=None,
help="Output file for the migration script.",
)
@click.option(
"--filter-by-all/--no-filter-by-all",
default=True,
help="Output file for the migration script.",
)
@click.option(
"--format",
type=click.Choice(["json", "grit"], case_sensitive=False),
default="json",
help="The output format for the migration script (json or grit).",
)
def generic(
pkg1: str, pkg2: str, output: str, filter_by_all: bool, format: str
) -> None:
"""Generate a migration script."""
click.echo("Migration script generated.")
migrations = generate_simplified_migrations(pkg1, pkg2, filter_by_all=filter_by_all)
if output is not None:
name = output.removesuffix(".json").removesuffix(".grit")
else:
name = f"{pkg1}_to_{pkg2}"
if output is None:
output = f"{name}.json" if format == "json" else f"{name}.grit"
if format == "json":
dumped = json.dumps(migrations, indent=2, sort_keys=True)
else:
dumped = dump_migrations_as_grit(name, migrations)
with open(output, "w") as f:
f.write(dumped)
def handle_partner(pkg: str, output: str = None):
migrations = get_migrations_for_partner_package(pkg)
# Run with python 3.9+
name = pkg.removeprefix("langchain_")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit" if output is None else output
if migrations:
with open(output_name, "w") as f:
f.write(data)
click.secho(f"LangChain migration script saved to {output_name}")
else:
click.secho(f"No migrations found for {pkg}", fg="yellow")
@cli.command()
@click.argument("pkg")
@click.option("--output", default=None, help="Output file for the migration script.")
def partner(pkg: str, output: str) -> None:
"""Generate migration scripts specifically for LangChain modules."""
click.echo("Migration script for LangChain generated.")
handle_partner(pkg, output)
@cli.command()
@click.argument("json_file")
def json_to_grit(json_file: str) -> None:
"""Generate a Grit migration from an old JSON migration file."""
with open(json_file) as f:
migrations = json.load(f)
name = os.path.basename(json_file).removesuffix(".json").removesuffix(".grit")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit"
with open(output_name, "w") as f:
f.write(data)
click.secho(f"GritQL migration script saved to {output_name}")
@cli.command()
def all_installed_partner_pkgs() -> None:
"""Generate migration scripts for all LangChain modules."""
# Will generate migrations for all partner packages.
# Define as "langchain_<partner_name>".
# First let's determine which packages are installed in the environment
# and then generate migrations for them.
langchain_pkgs = [
name
for _, name, _ in pkgutil.iter_modules()
if name.startswith("langchain_")
and name not in {"langchain_core", "langchain_cli", "langchain_community"}
]
for pkg in langchain_pkgs:
handle_partner(pkg)
if __name__ == "__main__":
cli()
|
# type: ignore
"""Script to generate migrations for the migration script."""
import json
import os
import pkgutil
import click
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
from langchain_cli.namespaces.migrate.generate.grit import (
dump_migrations_as_grit,
)
from langchain_cli.namespaces.migrate.generate.partner import (
get_migrations_for_partner_package,
)
@click.group()
def cli():
"""Migration scripts management."""
pass
@cli.command()
@click.option(
"--pkg1",
default="langchain",
)
@click.option(
"--pkg2",
default="langchain_community",
)
@click.option(
"--output",
default=None,
help="Output file for the migration script.",
)
@click.option(
"--filter-by-all/--no-filter-by-all",
default=True,
help="Output file for the migration script.",
)
@click.option(
"--format",
type=click.Choice(["json", "grit"], case_sensitive=False),
default="json",
help="The output format for the migration script (json or grit).",
)
def generic(
pkg1: str, pkg2: str, output: str, filter_by_all: bool, format: str
) -> None:
"""Generate a migration script."""
click.echo("Migration script generated.")
migrations = generate_simplified_migrations(pkg1, pkg2, filter_by_all=filter_by_all)
if output is not None:
name = output.removesuffix(".json").removesuffix(".grit")
else:
name = f"{pkg1}_to_{pkg2}"
if output is None:
output = f"{name}.json" if format == "json" else f"{name}.grit"
if format == "json":
dumped = json.dumps(migrations, indent=2, sort_keys=True)
else:
dumped = dump_migrations_as_grit(name, migrations)
with open(output, "w") as f:
f.write(dumped)
def handle_partner(pkg: str, output: str = None):
migrations = get_migrations_for_partner_package(pkg)
# Run with python 3.9+
name = pkg.removeprefix("langchain_")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit" if output is None else output
if migrations:
with open(output_name, "w") as f:
f.write(data)
click.secho(f"LangChain migration script saved to {output_name}")
else:
click.secho(f"No migrations found for {pkg}", fg="yellow")
@cli.command()
@click.argument("pkg")
@click.option("--output", default=None, help="Output file for the migration script.")
def partner(pkg: str, output: str) -> None:
"""Generate migration scripts specifically for LangChain modules."""
click.echo("Migration script for LangChain generated.")
handle_partner(pkg, output)
@cli.command()
@click.argument("json_file")
def json_to_grit(json_file: str) -> None:
"""Generate a Grit migration from an old JSON migration file."""
with open(json_file, "r") as f:
migrations = json.load(f)
name = os.path.basename(json_file).removesuffix(".json").removesuffix(".grit")
data = dump_migrations_as_grit(name, migrations)
output_name = f"{name}.grit"
with open(output_name, "w") as f:
f.write(data)
click.secho(f"GritQL migration script saved to {output_name}")
@cli.command()
def all_installed_partner_pkgs() -> None:
"""Generate migration scripts for all LangChain modules."""
# Will generate migrations for all partner packages.
# Define as "langchain_<partner_name>".
# First let's determine which packages are installed in the environment
# and then generate migrations for them.
langchain_pkgs = [
name
for _, name, _ in pkgutil.iter_modules()
if name.startswith("langchain_")
and name not in {"langchain_core", "langchain_cli", "langchain_community"}
]
for pkg in langchain_pkgs:
handle_partner(pkg)
if __name__ == "__main__":
cli()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import StableCascadeUNet
from diffusers.utils import logging
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
logger = logging.get_logger(__name__)
enable_full_determinism()
@slow
@require_torch_accelerator
class StableCascadeUNetSingleFileTest(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components_stage_b(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade", variant="bf16", subfolder="decoder", use_safetensors=True
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_single_file_components_stage_b_lite(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade", variant="bf16", subfolder="decoder_lite"
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_single_file_components_stage_c(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior"
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
def test_single_file_components_stage_c_lite(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_lite_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior_lite"
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert model.config[param_name] == param_value, (
f"{param_name} differs between single file loading and pretrained loading"
)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import StableCascadeUNet
from diffusers.utils import logging
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
require_torch_accelerator,
slow,
torch_device,
)
logger = logging.get_logger(__name__)
enable_full_determinism()
@slow
@require_torch_accelerator
class StableCascadeUNetSingleFileTest(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_components_stage_b(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade", variant="bf16", subfolder="decoder", use_safetensors=True
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_single_file_components_stage_b_lite(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade", variant="bf16", subfolder="decoder_lite"
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_single_file_components_stage_c(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior"
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
def test_single_file_components_stage_c_lite(self):
model_single_file = StableCascadeUNet.from_single_file(
"https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_lite_bf16.safetensors",
torch_dtype=torch.bfloat16,
)
model = StableCascadeUNet.from_pretrained(
"stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior_lite"
)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between single file loading and pretrained loading"
|
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity = get_current_verbosity()
assert old_verbosity == 1
with xgb.config_context(verbosity=verbosity_level):
new_verbosity = get_current_verbosity()
assert new_verbosity == verbosity_level
assert old_verbosity == get_current_verbosity()
@pytest.mark.parametrize("use_rmm", [False, True])
def test_global_config_use_rmm(use_rmm):
def get_current_use_rmm_flag():
return xgb.get_config()["use_rmm"]
old_use_rmm_flag = get_current_use_rmm_flag()
with xgb.config_context(use_rmm=use_rmm):
new_use_rmm_flag = get_current_use_rmm_flag()
assert new_use_rmm_flag == use_rmm
assert old_use_rmm_flag == get_current_use_rmm_flag()
def test_nested_config() -> None:
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=2):
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=1):
assert xgb.get_config()["verbosity"] == 1
assert xgb.get_config()["verbosity"] == 2
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=None):
assert xgb.get_config()["verbosity"] == 3 # None has no effect
xgb.set_config(verbosity=2)
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
xgb.set_config(verbosity=verbosity) # reset
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
def test_thread_safety():
n_threads = multiprocessing.cpu_count()
futures = []
with ThreadPoolExecutor(max_workers=n_threads) as executor:
for i in range(256):
f = executor.submit(test_nested_config)
futures.append(f)
for f in futures:
f.result()
def test_nthread() -> None:
config = xgb.get_config()
assert config["nthread"] == 0
|
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
import pytest
import xgboost as xgb
@pytest.mark.parametrize("verbosity_level", [0, 1, 2, 3])
def test_global_config_verbosity(verbosity_level):
def get_current_verbosity():
return xgb.get_config()["verbosity"]
old_verbosity = get_current_verbosity()
assert old_verbosity == 1
with xgb.config_context(verbosity=verbosity_level):
new_verbosity = get_current_verbosity()
assert new_verbosity == verbosity_level
assert old_verbosity == get_current_verbosity()
@pytest.mark.parametrize("use_rmm", [False, True])
def test_global_config_use_rmm(use_rmm):
def get_current_use_rmm_flag():
return xgb.get_config()["use_rmm"]
old_use_rmm_flag = get_current_use_rmm_flag()
with xgb.config_context(use_rmm=use_rmm):
new_use_rmm_flag = get_current_use_rmm_flag()
assert new_use_rmm_flag == use_rmm
assert old_use_rmm_flag == get_current_use_rmm_flag()
def test_nested_config() -> None:
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=2):
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=1):
assert xgb.get_config()["verbosity"] == 1
assert xgb.get_config()["verbosity"] == 2
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
with xgb.config_context(verbosity=None):
assert xgb.get_config()["verbosity"] == 3 # None has no effect
xgb.set_config(verbosity=2)
assert xgb.get_config()["verbosity"] == 2
with xgb.config_context(verbosity=3):
assert xgb.get_config()["verbosity"] == 3
xgb.set_config(verbosity=verbosity) # reset
verbosity = xgb.get_config()["verbosity"]
assert verbosity == 1
def test_thread_safety():
n_threads = multiprocessing.cpu_count()
futures = []
with ThreadPoolExecutor(max_workers=n_threads) as executor:
for i in range(256):
f = executor.submit(test_nested_config)
futures.append(f)
for f in futures:
f.result()
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Ticker News API"""
query: str
class PolygonTickerNews(BaseTool):
"""Tool that gets the latest news for a given ticker from Polygon"""
mode: str = "get_ticker_news"
name: str = "polygon_ticker_news"
description: str = (
"A wrapper around Polygon's Ticker News API. "
"This tool is useful for fetching the latest news for a stock. "
"Input should be the ticker that you want to get the latest news for."
)
args_schema: Type[BaseModel] = Inputs
api_wrapper: PolygonAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(self.mode, ticker=query)
|
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Ticker News API"""
query: str
class PolygonTickerNews(BaseTool): # type: ignore[override, override]
"""Tool that gets the latest news for a given ticker from Polygon"""
mode: str = "get_ticker_news"
name: str = "polygon_ticker_news"
description: str = (
"A wrapper around Polygon's Ticker News API. "
"This tool is useful for fetching the latest news for a stock. "
"Input should be the ticker that you want to get the latest news for."
)
args_schema: Type[BaseModel] = Inputs
api_wrapper: PolygonAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(self.mode, ticker=query)
|
"""Chroma Reader."""
from typing import Any, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class ChromaReader(BaseReader):
"""
Chroma reader.
Retrieve documents from existing persisted Chroma collections.
Args:
collection_name: Name of the persisted collection.
persist_directory: Directory where the collection is persisted.
"""
def __init__(
self,
collection_name: str,
persist_directory: Optional[str] = None,
chroma_api_impl: str = "rest",
chroma_db_impl: Optional[str] = None,
host: str = "localhost",
port: int = 8000,
) -> None:
"""Initialize with parameters."""
import_err_msg = (
"`chromadb` package not found, please run `pip install chromadb`"
)
try:
import chromadb
except ImportError:
raise ImportError(import_err_msg)
if collection_name is None:
raise ValueError("Please provide a collection name.")
# from chromadb.config import Settings
if persist_directory is not None:
self._client = chromadb.PersistentClient(
path=persist_directory if persist_directory else "./chroma",
)
elif (host is not None) or (port is not None):
self._client = chromadb.HttpClient(
host=host,
port=port,
)
self._collection = self._client.get_collection(collection_name)
def create_documents(self, results: Any) -> List[Document]:
"""
Create documents from the results.
Args:
results: Results from the query.
Returns:
List of documents.
"""
documents = []
for result in zip(
results["ids"][0],
results["documents"][0],
results["embeddings"][0],
results["metadatas"][0],
):
document = Document(
id_=result[0],
text=result[1],
embedding=result[2],
metadata=result[3],
)
documents.append(document)
return documents
def load_data(
self,
query_embedding: Optional[List[float]] = None,
limit: int = 10,
where: Optional[dict] = None,
where_document: Optional[dict] = None,
query: Optional[Union[str, List[str]]] = None,
) -> Any:
"""
Load data from the collection.
Args:
limit: Number of results to return.
where: Filter results by metadata. {"metadata_field": "is_equal_to_this"}
where_document: Filter results by document. {"$contains":"search_string"}
Returns:
List of documents.
"""
where = where or {}
where_document = where_document or {}
if query_embedding is not None:
results = self._collection.search(
query_embedding=query_embedding,
n_results=limit,
where=where,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
return self.create_documents(results)
elif query is not None:
query = query if isinstance(query, list) else [query]
results = self._collection.query(
query_texts=query,
n_results=limit,
where=where,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
return self.create_documents(results)
else:
raise ValueError("Please provide either query embedding or query.")
|
"""Chroma Reader."""
from typing import Any, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class ChromaReader(BaseReader):
"""Chroma reader.
Retrieve documents from existing persisted Chroma collections.
Args:
collection_name: Name of the persisted collection.
persist_directory: Directory where the collection is persisted.
"""
def __init__(
self,
collection_name: str,
persist_directory: Optional[str] = None,
chroma_api_impl: str = "rest",
chroma_db_impl: Optional[str] = None,
host: str = "localhost",
port: int = 8000,
) -> None:
"""Initialize with parameters."""
import_err_msg = (
"`chromadb` package not found, please run `pip install chromadb`"
)
try:
import chromadb
except ImportError:
raise ImportError(import_err_msg)
if collection_name is None:
raise ValueError("Please provide a collection name.")
# from chromadb.config import Settings
if persist_directory is not None:
self._client = chromadb.PersistentClient(
path=persist_directory if persist_directory else "./chroma",
)
elif (host is not None) or (port is not None):
self._client = chromadb.HttpClient(
host=host,
port=port,
)
self._collection = self._client.get_collection(collection_name)
def create_documents(self, results: Any) -> List[Document]:
"""Create documents from the results.
Args:
results: Results from the query.
Returns:
List of documents.
"""
documents = []
for result in zip(
results["ids"][0],
results["documents"][0],
results["embeddings"][0],
results["metadatas"][0],
):
document = Document(
id_=result[0],
text=result[1],
embedding=result[2],
metadata=result[3],
)
documents.append(document)
return documents
def load_data(
self,
query_embedding: Optional[List[float]] = None,
limit: int = 10,
where: Optional[dict] = None,
where_document: Optional[dict] = None,
query: Optional[Union[str, List[str]]] = None,
) -> Any:
"""Load data from the collection.
Args:
limit: Number of results to return.
where: Filter results by metadata. {"metadata_field": "is_equal_to_this"}
where_document: Filter results by document. {"$contains":"search_string"}
Returns:
List of documents.
"""
where = where or {}
where_document = where_document or {}
if query_embedding is not None:
results = self._collection.search(
query_embedding=query_embedding,
n_results=limit,
where=where,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
return self.create_documents(results)
elif query is not None:
query = query if isinstance(query, list) else [query]
results = self._collection.query(
query_texts=query,
n_results=limit,
where=where,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
return self.create_documents(results)
else:
raise ValueError("Please provide either query embedding or query.")
|
from .pdf_segmenter import PDFSegmenter
|
from .pdf_segmenter import PDFSegmenter
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[AudioBytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.zeros(1000, 2),
)
doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
---
```python
from typing import Optional
import torch
from docarray import BaseDoc
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDoc):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.zeros(1000, 2),
)
doc_1.audio_tensor.save(file_path='/tmp/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor, _ = doc_2.url.load()
doc_2.audio_tensor.save(file_path='/tmp/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
```
---
"""
...
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.3.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.19.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
"""Tool for the Google search API."""
from typing import Optional, Type
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
class GooglePlacesSchema(BaseModel):
"""Input for GooglePlacesTool."""
query: str = Field(..., description="Query for google maps")
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GooglePlacesTool",
)
class GooglePlacesTool(BaseTool):
"""Tool that queries the Google places API."""
name: str = "google_places"
description: str = (
"A wrapper around Google Places. "
"Useful for when you need to validate or "
"discover addressed from ambiguous text. "
"Input should be a search query."
)
api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper)
args_schema: Type[BaseModel] = GooglePlacesSchema
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Google search API."""
from typing import Optional, Type
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
class GooglePlacesSchema(BaseModel):
"""Input for GooglePlacesTool."""
query: str = Field(..., description="Query for google maps")
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GooglePlacesTool",
)
class GooglePlacesTool(BaseTool): # type: ignore[override, override]
"""Tool that queries the Google places API."""
name: str = "google_places"
description: str = (
"A wrapper around Google Places. "
"Useful for when you need to validate or "
"discover addressed from ambiguous text. "
"Input should be a search query."
)
api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) # type: ignore[arg-type]
args_schema: Type[BaseModel] = GooglePlacesSchema
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`AudioDoc.url`), an AudioTensor
(`AudioDoc.tensor`), and an AnyEmbedding (`AudioDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import AudioDoc
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes_ = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AudioUrl
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='AudioDoc')
class AudioDoc(BaseDocument):
"""
Document for handling audios.
The Audio Document can contain an AudioUrl (`AudioDoc.url`), an AudioTensor
(`AudioDoc.tensor`), and an AnyEmbedding (`AudioDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import AudioDoc
# use it directly
audio = Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import AudioDoc, TextDoc
from typing import Optional
# extend it
class MyAudio(Audio):
name: Optional[Text]
audio = MyAudio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
)
audio.tensor, audio.frame_rate = audio.url.load()
model = MyEmbeddingModel()
audio.embedding = model(audio.tensor)
audio.name = Text(text='my first audio')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import AudioDoc, TextDoc
# compose it
class MultiModalDoc(Document):
audio: Audio
text: Text
mmdoc = MultiModalDoc(
audio=Audio(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/hello.wav?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.url.load()
# equivalent to
mmdoc.audio.bytes = mmdoc.audio.url.load_bytes()
mmdoc.audio.tensor, mmdoc.audio.frame_rate = mmdoc.audio.bytes.load()
"""
url: Optional[AudioUrl]
tensor: Optional[AudioTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[AudioBytes]
frame_rate: Optional[int]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_training = []
mocked_validation = []
mocked_testing = []
sample_rate = 22050
seed = 0
for genre in gtzan.gtzan_genres:
base_dir = os.path.join(root_dir, "genres", genre)
os.makedirs(base_dir, exist_ok=True)
for i in range(100):
filename = f"{genre}.{i:05d}"
path = os.path.join(base_dir, f"{filename}.wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="int16", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, genre)
mocked_samples.append(sample)
if filename in gtzan.filtered_test:
mocked_testing.append(sample)
if filename in gtzan.filtered_train:
mocked_training.append(sample)
if filename in gtzan.filtered_valid:
mocked_validation.append(sample)
seed += 1
return (mocked_samples, mocked_training, mocked_validation, mocked_testing)
class TestGTZAN(TempDirMixin, TorchaudioTestCase):
root_dir = None
samples = []
training = []
validation = []
testing = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
mocked_data = get_mock_dataset(cls.root_dir)
cls.samples = mocked_data[0]
cls.training = mocked_data[1]
cls.validation = mocked_data[2]
cls.testing = mocked_data[3]
def test_no_subset(self):
dataset = gtzan.GTZAN(self.root_dir)
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert label == self.samples[i][2]
n_ite += 1
assert n_ite == len(self.samples)
def _test_training(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.training[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.training[i][1]
assert label == self.training[i][2]
n_ite += 1
assert n_ite == len(self.training)
def _test_validation(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.validation[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.validation[i][1]
assert label == self.validation[i][2]
n_ite += 1
assert n_ite == len(self.validation)
def _test_testing(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.testing[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.testing[i][1]
assert label == self.testing[i][2]
n_ite += 1
assert n_ite == len(self.testing)
def test_training_str(self):
train_dataset = gtzan.GTZAN(self.root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_str(self):
val_dataset = gtzan.GTZAN(self.root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_str(self):
test_dataset = gtzan.GTZAN(self.root_dir, subset="testing")
self._test_testing(test_dataset)
def test_training_path(self):
root_dir = Path(self.root_dir)
train_dataset = gtzan.GTZAN(root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_path(self):
root_dir = Path(self.root_dir)
val_dataset = gtzan.GTZAN(root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_path(self):
root_dir = Path(self.root_dir)
test_dataset = gtzan.GTZAN(root_dir, subset="testing")
self._test_testing(test_dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import get_whitenoise, normalize_wav, save_wav, TempDirMixin, TorchaudioTestCase
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_training = []
mocked_validation = []
mocked_testing = []
sample_rate = 22050
seed = 0
for genre in gtzan.gtzan_genres:
base_dir = os.path.join(root_dir, "genres", genre)
os.makedirs(base_dir, exist_ok=True)
for i in range(100):
filename = f"{genre}.{i:05d}"
path = os.path.join(base_dir, f"{filename}.wav")
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype="int16", seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, genre)
mocked_samples.append(sample)
if filename in gtzan.filtered_test:
mocked_testing.append(sample)
if filename in gtzan.filtered_train:
mocked_training.append(sample)
if filename in gtzan.filtered_valid:
mocked_validation.append(sample)
seed += 1
return (mocked_samples, mocked_training, mocked_validation, mocked_testing)
class TestGTZAN(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
training = []
validation = []
testing = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
mocked_data = get_mock_dataset(cls.root_dir)
cls.samples = mocked_data[0]
cls.training = mocked_data[1]
cls.validation = mocked_data[2]
cls.testing = mocked_data[3]
def test_no_subset(self):
dataset = gtzan.GTZAN(self.root_dir)
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert label == self.samples[i][2]
n_ite += 1
assert n_ite == len(self.samples)
def _test_training(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.training[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.training[i][1]
assert label == self.training[i][2]
n_ite += 1
assert n_ite == len(self.training)
def _test_validation(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.validation[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.validation[i][1]
assert label == self.validation[i][2]
n_ite += 1
assert n_ite == len(self.validation)
def _test_testing(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.testing[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.testing[i][1]
assert label == self.testing[i][2]
n_ite += 1
assert n_ite == len(self.testing)
def test_training_str(self):
train_dataset = gtzan.GTZAN(self.root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_str(self):
val_dataset = gtzan.GTZAN(self.root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_str(self):
test_dataset = gtzan.GTZAN(self.root_dir, subset="testing")
self._test_testing(test_dataset)
def test_training_path(self):
root_dir = Path(self.root_dir)
train_dataset = gtzan.GTZAN(root_dir, subset="training")
self._test_training(train_dataset)
def test_validation_path(self):
root_dir = Path(self.root_dir)
val_dataset = gtzan.GTZAN(root_dir, subset="validation")
self._test_validation(val_dataset)
def test_testing_path(self):
root_dir = Path(self.root_dir)
test_dataset = gtzan.GTZAN(root_dir, subset="testing")
self._test_testing(test_dataset)
|
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether to enable OpenMP
use_openmp: bool = True
# Whether to enable CUDA
use_cuda: bool = False
# Whether to enable NCCL
use_nccl: bool = False
# Whether to load nccl dynamically
use_dlopen_nccl: bool = False
# Whether to enable HDFS
use_hdfs: bool = False
# Whether to enable Azure Storage
use_azure: bool = False
# Whether to enable AWS S3
use_s3: bool = False
# Whether to enable the dense parser plugin
plugin_dense_parser: bool = False
# Special option: See explanation below
use_system_libxgboost: bool = False
def _set_config_setting(self, config_settings: Dict[str, Any]) -> None:
for field_name in config_settings:
setattr(
self,
field_name,
(config_settings[field_name].lower() in ["true", "1", "on"]),
)
def update(self, config_settings: Optional[Dict[str, Any]]) -> None:
"""Parse config_settings from Pip (or other PEP 517 frontend)"""
if config_settings is not None:
self._set_config_setting(config_settings)
def get_cmake_args(self) -> List[str]:
"""Convert build configuration to CMake args"""
cmake_args = []
for field_name in [x.name for x in dataclasses.fields(self)]:
if field_name in ["use_system_libxgboost"]:
continue
cmake_option = field_name.upper()
cmake_value = "ON" if getattr(self, field_name) is True else "OFF"
cmake_args.append(f"-D{cmake_option}={cmake_value}")
return cmake_args
|
"""Build configuration"""
import dataclasses
from typing import Any, Dict, List, Optional
@dataclasses.dataclass
class BuildConfiguration: # pylint: disable=R0902
"""Configurations use when building libxgboost"""
# Whether to hide C++ symbols in libxgboost.so
hide_cxx_symbols: bool = True
# Whether to enable OpenMP
use_openmp: bool = True
# Whether to enable CUDA
use_cuda: bool = False
# Whether to enable NCCL
use_nccl: bool = False
# Whether to enable HDFS
use_hdfs: bool = False
# Whether to enable Azure Storage
use_azure: bool = False
# Whether to enable AWS S3
use_s3: bool = False
# Whether to enable the dense parser plugin
plugin_dense_parser: bool = False
# Special option: See explanation below
use_system_libxgboost: bool = False
def _set_config_setting(self, config_settings: Dict[str, Any]) -> None:
for field_name in config_settings:
setattr(
self,
field_name,
(config_settings[field_name].lower() in ["true", "1", "on"]),
)
def update(self, config_settings: Optional[Dict[str, Any]]) -> None:
"""Parse config_settings from Pip (or other PEP 517 frontend)"""
if config_settings is not None:
self._set_config_setting(config_settings)
def get_cmake_args(self) -> List[str]:
"""Convert build configuration to CMake args"""
cmake_args = []
for field_name in [x.name for x in dataclasses.fields(self)]:
if field_name in ["use_system_libxgboost"]:
continue
cmake_option = field_name.upper()
cmake_value = "ON" if getattr(self, field_name) is True else "OFF"
cmake_args.append(f"-D{cmake_option}={cmake_value}")
return cmake_args
|
from typing import Any, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
self.format = format
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
Args:
format (str or tv_tensors.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.tv_tensors.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def __init__(self, format: Union[str, tv_tensors.BoundingBoxFormat]) -> None:
super().__init__()
self.format = format
def transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.convert_bounding_box_format(inpt, new_format=self.format) # type: ignore[return-value, arg-type]
class ClampBoundingBoxes(Transform):
"""Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``canvas_size`` meta-data.
"""
_transformed_types = (tv_tensors.BoundingBoxes,)
def transform(self, inpt: tv_tensors.BoundingBoxes, params: Dict[str, Any]) -> tv_tensors.BoundingBoxes:
return F.clamp_bounding_boxes(inpt) # type: ignore[return-value]
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_graph_metadata, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode_document(
corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode_query(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.models import Router
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
router = Router.for_query_document(
query_modules=[
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
document_modules=[
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
)
sparse_model = SparseEncoder(modules=[router], similarity_fn_name="dot")
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(
[{"doc": doc} for doc in corpus], convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode([{"query": query} for query in queries], convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Type, Union
from docarray.document import BaseDocument
if TYPE_CHECKING:
from docarray.typing import NdArray, TorchTensor
class AbstractDocumentArray(Sequence):
document_type: Type[BaseDocument]
_columns: Optional[
Dict[str, Union['TorchTensor', 'AbstractDocumentArray', 'NdArray', None]]
] # here columns are the holder of the data in tensor modes
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem__(
cls, item: Type[BaseDocument]
) -> Type['AbstractDocumentArray']:
...
@abstractmethod
def is_stacked(self) -> bool:
...
@abstractmethod
def _column_fields(self) -> List[str]:
...
|
from abc import abstractmethod
from typing import Iterable, Type
from docarray.document import BaseDocument
class AbstractDocumentArray(Iterable):
document_type: Type[BaseDocument]
@abstractmethod
def __init__(self, docs: Iterable[BaseDocument]):
...
@abstractmethod
def __class_getitem__(
cls, item: Type[BaseDocument]
) -> Type['AbstractDocumentArray']:
...
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
from .misc import find_latest_checkpoint
__all__ = [
'get_root_logger',
'collect_env',
'find_latest_checkpoint',
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_root_logger
__all__ = ['get_root_logger', 'collect_env']
|
import os
import sys
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make sure the
# conda environment/bin path is configured Please take a look:
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
# Please note: if some path can't be added using add_dll_directory we simply ignore this path
if os.name == "nt" and sys.version_info < (3, 9):
env_path = os.environ["PATH"]
path_arr = env_path.split(";")
for path in path_arr:
if os.path.exists(path):
try:
os.add_dll_directory(path) # type: ignore[attr-defined]
except Exception:
pass
lib_path = _get_extension_path("_C")
torch.ops.load_library(lib_path)
_HAS_OPS = True
def _has_ops(): # noqa: F811
return True
except (ImportError, OSError):
pass
def _assert_has_ops():
if not _has_ops():
raise RuntimeError(
"Couldn't load custom C++ ops. This can happen if your PyTorch and "
"torchvision versions are incompatible, or if you had errors while compiling "
"torchvision from source. For further information on the compatible versions, check "
"https://github.com/pytorch/vision#installation for the compatibility matrix. "
"Please check your PyTorch version with torch.__version__ and your torchvision "
"version with torchvision.__version__ and verify if they are compatible, and if not "
"please reinstall torchvision so that it matches your PyTorch install."
)
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
from torch.version import cuda as torch_version_cuda
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch_version_cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch_version_cuda.split(".")
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major:
raise RuntimeError(
"Detected that PyTorch and torchvision were compiled with different CUDA major versions. "
f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has "
f"CUDA Version={tv_major}.{tv_minor}. "
"Please reinstall the torchvision that matches your PyTorch install."
)
return _version
def _load_library(lib_name):
lib_path = _get_extension_path(lib_name)
torch.ops.load_library(lib_path)
_check_cuda_version()
|
import os
import sys
import torch
from ._internally_replaced_utils import _get_extension_path
_HAS_OPS = False
def _has_ops():
return False
try:
# On Windows Python-3.8.x has `os.add_dll_directory` call,
# which is called to configure dll search path.
# To find cuda related dlls we need to make sure the
# conda environment/bin path is configured Please take a look:
# https://stackoverflow.com/questions/59330863/cant-import-dll-module-in-python
# Please note: if some path can't be added using add_dll_directory we simply ignore this path
if os.name == "nt" and sys.version_info < (3, 9):
env_path = os.environ["PATH"]
path_arr = env_path.split(";")
for path in path_arr:
if os.path.exists(path):
try:
os.add_dll_directory(path) # type: ignore[attr-defined]
except Exception:
pass
lib_path = _get_extension_path("_C")
torch.ops.load_library(lib_path)
_HAS_OPS = True
def _has_ops(): # noqa: F811
return True
except (ImportError, OSError):
pass
def _assert_has_ops():
if not _has_ops():
raise RuntimeError(
"Couldn't load custom C++ ops. This can happen if your PyTorch and "
"torchvision versions are incompatible, or if you had errors while compiling "
"torchvision from source. For further information on the compatible versions, check "
"https://github.com/pytorch/vision#installation for the compatibility matrix. "
"Please check your PyTorch version with torch.__version__ and your torchvision "
"version with torchvision.__version__ and verify if they are compatible, and if not "
"please reinstall torchvision so that it matches your PyTorch install."
)
def _check_cuda_version():
"""
Make sure that CUDA versions match between the pytorch install and torchvision install
"""
if not _HAS_OPS:
return -1
from torch.version import cuda as torch_version_cuda
_version = torch.ops.torchvision._cuda_version()
if _version != -1 and torch_version_cuda is not None:
tv_version = str(_version)
if int(tv_version) < 10000:
tv_major = int(tv_version[0])
tv_minor = int(tv_version[2])
else:
tv_major = int(tv_version[0:2])
tv_minor = int(tv_version[3])
t_version = torch_version_cuda.split(".")
t_major = int(t_version[0])
t_minor = int(t_version[1])
if t_major != tv_major or t_minor != tv_minor:
raise RuntimeError(
"Detected that PyTorch and torchvision were compiled with different CUDA versions. "
f"PyTorch has CUDA Version={t_major}.{t_minor} and torchvision has "
f"CUDA Version={tv_major}.{tv_minor}. "
"Please reinstall the torchvision that matches your PyTorch install."
)
return _version
def _load_library(lib_name):
lib_path = _get_extension_path(lib_name)
torch.ops.load_library(lib_path)
_check_cuda_version()
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocument):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_field_type(field))
|
import numpy as np
import torch
from docarray import BaseDocument
from docarray.document import AnyDocument
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchTensor,
)
def test_proto_all_types():
class Mymmdoc(BaseDocument):
tensor: NdArray
torch_tensor: TorchTensor
embedding: Embedding
any_url: AnyUrl
image_url: ImageUrl
text_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
doc = Mymmdoc(
tensor=np.zeros((3, 224, 224)),
torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((100, 1)),
any_url='http://jina.ai',
image_url='http://jina.ai/bla.jpg',
text_url='http://jina.ai',
mesh_url='http://jina.ai/mesh.obj',
point_cloud_url='http://jina.ai/mesh.obj',
)
new_doc = AnyDocument.from_protobuf(doc.to_protobuf())
for field, value in new_doc:
if field == 'embedding':
# embedding is a Union type, not supported by isinstance
assert isinstance(value, np.ndarray) or isinstance(value, torch.Tensor)
else:
assert isinstance(value, doc._get_nested_document_class(field))
|
import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F
def plot(imgs, row_title=None, **imshow_kwargs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
_, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
for col_idx, img in enumerate(row):
boxes = None
masks = None
if isinstance(img, tuple):
img, target = img
if isinstance(target, dict):
boxes = target.get("boxes")
masks = target.get("masks")
elif isinstance(target, datapoints.BoundingBoxes):
boxes = target
else:
raise ValueError(f"Unexpected target type: {type(target)}")
img = F.to_image(img)
if img.dtype.is_floating_point and img.min() < 0:
# Poor man's re-normalization for the colors to be OK-ish. This
# is useful for images coming out of Normalize()
img -= img.min()
img /= img.max()
img = F.to_dtype(img, torch.uint8, scale=True)
if boxes is not None:
img = draw_bounding_boxes(img, boxes, colors="yellow", width=3)
if masks is not None:
img = draw_segmentation_masks(img, masks.to(torch.bool), colors=["green"] * masks.shape[0], alpha=.65)
ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy(), **imshow_kwargs)
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
if row_title is not None:
for row_idx in range(num_rows):
axs[row_idx, 0].set(ylabel=row_title[row_idx])
plt.tight_layout()
|
import matplotlib.pyplot as plt
import torch
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F
def plot(imgs):
if not isinstance(imgs[0], list):
# Make a 2d grid even if there's just 1 row
imgs = [imgs]
num_rows = len(imgs)
num_cols = len(imgs[0])
_, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False)
for row_idx, row in enumerate(imgs):
for col_idx, img in enumerate(row):
boxes = None
masks = None
if isinstance(img, tuple):
img, target = img
if isinstance(target, dict):
boxes = target.get("boxes")
masks = target.get("masks")
elif isinstance(target, datapoints.BoundingBoxes):
boxes = target
else:
raise ValueError(f"Unexpected target type: {type(target)}")
img = F.to_image(img)
if img.dtype.is_floating_point and img.min() < 0:
# Poor man's re-normalization for the colors to be OK-ish. This
# is useful for images coming out of Normalize()
img -= img.min()
img /= img.max()
img = F.to_dtype(img, torch.uint8, scale=True)
if boxes is not None:
img = draw_bounding_boxes(img, boxes, colors="yellow", width=3)
if masks is not None:
img = draw_segmentation_masks(img, masks.to(torch.bool), colors=["green"] * masks.shape[0], alpha=.65)
ax = axs[row_idx, col_idx]
ax.imshow(img.permute(1, 2, 0).numpy())
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
plt.tight_layout()
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
Model Sparsity Stats: num_rows: 1500, num_cols: 30522, row_non_zero_mean: 80.34333038330078, row_sparsity_mean: 0.9973676204681396
Model Sparsity Stats: num_rows: 1500, num_cols: 30522, row_non_zero_mean: 81.78266906738281, row_sparsity_mean: 0.9973204731941223
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .progressbar import (ProgressBar, track_iter_progress,
track_parallel_progress, track_progress)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .timer import Timer, check_time
from .torch_ops import torch_meshgrid
from .trace import is_jit_tracing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'revert_sync_batchnorm', 'collect_env',
'Timer', 'check_time', 'ProgressBar', 'track_iter_progress',
'track_parallel_progress', 'track_progress', 'torch_meshgrid',
'is_jit_tracing'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
has_batch_norm, has_method, import_modules_from_strings,
is_list_of, is_method_overridden, is_seq_of, is_str,
is_tuple_of, iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .package_utils import (call_command, check_install_package,
get_installed_path, is_installed)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_abs, is_filepath,
mkdir_or_exist, scandir, symlink)
from .setup_env import set_multi_processing
from .sync_bn import revert_sync_batchnorm
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'ManagerMeta', 'ManagerMixin', 'set_multi_processing', 'has_batch_norm',
'is_abs', 'is_installed', 'call_command', 'get_installed_path',
'check_install_package', 'is_abs', 'revert_sync_batchnorm', 'collect_env'
]
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_metric import (
AnswerConsistencyMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyEvaluator(BaseEvaluator):
"""
Tonic Validate's answer consistency metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_metric import (
AnswerConsistencyMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyEvaluator(BaseEvaluator):
"""
Tonic Validate's answer consistency metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
# The shape has been 29528 for <0.5.0, 29525 for 0.5.0, and 29524 for >=0.6.0, so let's make a safer test
# that checks the first dimension is close to 29525 and the second dimension is 32.
assert abs(model.embedding.weight.shape[0] - 29525) < 5
assert model.embedding.weight.shape[1] == 32
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from backend.data.block import BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, SchemaField
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
from .base import Slant3DBlockBase
class Slant3DSlicerBlock(Slant3DBlockBase):
"""Block for slicing 3D model files"""
class Input(BlockSchema):
credentials: Slant3DCredentialsInput = Slant3DCredentialsField()
file_url: str = SchemaField(
description="URL of the 3D model file to slice (STL)"
)
class Output(BlockSchema):
message: str = SchemaField(description="Response message")
price: float = SchemaField(description="Calculated price for printing")
error: str = SchemaField(description="Error message if slicing failed")
def __init__(self):
super().__init__(
id="f8a12c8d-3e4b-4d5f-b6a7-8c9d0e1f2g3h",
description="Slice a 3D model file and get pricing information",
input_schema=self.Input,
output_schema=self.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"file_url": "https://example.com/model.stl",
},
test_credentials=TEST_CREDENTIALS,
test_output=[("message", "Slicing successful"), ("price", 8.23)],
test_mock={
"_make_request": lambda *args, **kwargs: {
"message": "Slicing successful",
"data": {"price": 8.23},
}
},
)
async def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = await self._make_request(
"POST",
"slicer",
credentials.api_key.get_secret_value(),
json={"fileURL": input_data.file_url},
)
yield "message", result["message"]
yield "price", result["data"]["price"]
except Exception as e:
yield "error", str(e)
raise
|
from backend.data.block import BlockOutput, BlockSchema
from backend.data.model import APIKeyCredentials, SchemaField
from ._api import (
TEST_CREDENTIALS,
TEST_CREDENTIALS_INPUT,
Slant3DCredentialsField,
Slant3DCredentialsInput,
)
from .base import Slant3DBlockBase
class Slant3DSlicerBlock(Slant3DBlockBase):
"""Block for slicing 3D model files"""
class Input(BlockSchema):
credentials: Slant3DCredentialsInput = Slant3DCredentialsField()
file_url: str = SchemaField(
description="URL of the 3D model file to slice (STL)"
)
class Output(BlockSchema):
message: str = SchemaField(description="Response message")
price: float = SchemaField(description="Calculated price for printing")
error: str = SchemaField(description="Error message if slicing failed")
def __init__(self):
super().__init__(
id="f8a12c8d-3e4b-4d5f-b6a7-8c9d0e1f2g3h",
description="Slice a 3D model file and get pricing information",
input_schema=self.Input,
output_schema=self.Output,
test_input={
"credentials": TEST_CREDENTIALS_INPUT,
"file_url": "https://example.com/model.stl",
},
test_credentials=TEST_CREDENTIALS,
test_output=[("message", "Slicing successful"), ("price", 8.23)],
test_mock={
"_make_request": lambda *args, **kwargs: {
"message": "Slicing successful",
"data": {"price": 8.23},
}
},
)
def run(
self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
) -> BlockOutput:
try:
result = self._make_request(
"POST",
"slicer",
credentials.api_key.get_secret_value(),
json={"fileURL": input_data.file_url},
)
yield "message", result["message"]
yield "price", result["data"]["price"]
except Exception as e:
yield "error", str(e)
raise
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.librispeech.lightning import LibriSpeechRNNTModule
class MockLIBRISPEECH:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch(
"sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=4096)
), patch("asr.emformer_rnnt.librispeech.lightning.GlobalStatsNormalization", new=torch.nn.Identity), patch(
"torchaudio.datasets.LIBRISPEECH", new=MockLIBRISPEECH
), patch(
"asr.emformer_rnnt.librispeech.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield LibriSpeechRNNTModule(
librispeech_path="librispeech_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestLibriSpeechRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.librispeech.lightning import LibriSpeechRNNTModule
class MockLIBRISPEECH:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
16000,
"sup",
2,
3,
4,
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch(
"sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=4096)
), patch("asr.emformer_rnnt.librispeech.lightning.GlobalStatsNormalization", new=torch.nn.Identity), patch(
"torchaudio.datasets.LIBRISPEECH", new=MockLIBRISPEECH
), patch(
"asr.emformer_rnnt.librispeech.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield LibriSpeechRNNTModule(
librispeech_path="librispeech_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestLibriSpeechRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
_base_ = './sparse-rcnn_r50_fpn_300-proposals_crop-ms-480-800-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
import base64
from collections import defaultdict
from typing import TYPE_CHECKING, Type
import numpy as np
if TYPE_CHECKING:
from pydantic import BaseModel
from docarray.typing import T
from docarray.document.pydantic_model import PydanticDocument
class PydanticMixin:
"""Provide helper functions to convert to/from a Pydantic model"""
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a JSON Schema of Document class."""
from docarray.document.pydantic_model import PydanticDocument as DP
from pydantic import schema_json_of
return schema_json_of(DP, title='Document Schema', indent=indent)
def to_pydantic_model(self) -> 'PydanticDocument':
"""Convert a Document object into a Pydantic model."""
from docarray.document.pydantic_model import PydanticDocument as DP
_p_dict = {}
for f in self.non_empty_fields:
v = getattr(self, f)
if f in ('matches', 'chunks'):
_p_dict[f] = v.to_pydantic_model()
elif f in ('scores', 'evaluations'):
_p_dict[f] = {k: v.to_dict() for k, v in v.items()}
elif f == 'blob':
_p_dict[f] = base64.b64encode(v).decode('utf8')
else:
_p_dict[f] = v
return DP(**_p_dict)
@classmethod
def from_pydantic_model(cls: Type['T'], model: 'BaseModel') -> 'T':
"""Build a Document object from a Pydantic model
:param model: the pydantic data model object that represents a Document
:return: a Document object
"""
from docarray import Document
fields = {}
_field_chunks, _field_matches = None, None
if model.chunks:
_field_chunks = [Document.from_pydantic_model(d) for d in model.chunks]
if model.matches:
_field_matches = [Document.from_pydantic_model(d) for d in model.matches]
for (field, value) in model.dict(
exclude_none=True, exclude={'chunks', 'matches'}
).items():
f_name = field
if f_name == 'scores' or f_name == 'evaluations':
from docarray.score import NamedScore
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(v)
elif f_name == 'embedding' or f_name == 'tensor':
fields[f_name] = np.array(value)
elif f_name == 'blob':
fields[f_name] = base64.b64decode(value)
else:
fields[f_name] = value
d = Document(**fields)
if _field_chunks:
d.chunks = _field_chunks
if _field_matches:
d.matches = _field_matches
return d
|
import base64
from collections import defaultdict
from typing import TYPE_CHECKING, Type
import numpy as np
if TYPE_CHECKING:
from pydantic import BaseModel
from ...typing import T
from ..pydantic_model import PydanticDocument
class PydanticMixin:
"""Provide helper functions to convert to/from a Pydantic model"""
@classmethod
def get_json_schema(cls, indent: int = 2) -> str:
"""Return a JSON Schema of Document class."""
from ..pydantic_model import PydanticDocument as DP
from pydantic import schema_json_of
return schema_json_of(DP, title='Document Schema', indent=indent)
def to_pydantic_model(self) -> 'PydanticDocument':
"""Convert a Document object into a Pydantic model."""
from ..pydantic_model import PydanticDocument as DP
_p_dict = {}
for f in self.non_empty_fields:
v = getattr(self, f)
if f in ('matches', 'chunks'):
_p_dict[f] = v.to_pydantic_model()
elif f in ('scores', 'evaluations'):
_p_dict[f] = {k: v.to_dict() for k, v in v.items()}
elif f == 'blob':
_p_dict[f] = base64.b64encode(v).decode('utf8')
else:
_p_dict[f] = v
return DP(**_p_dict)
@classmethod
def from_pydantic_model(cls: Type['T'], model: 'BaseModel') -> 'T':
"""Build a Document object from a Pydantic model
:param model: the pydantic data model object that represents a Document
:return: a Document object
"""
from ... import Document
fields = {}
_field_chunks, _field_matches = None, None
if model.chunks:
_field_chunks = [Document.from_pydantic_model(d) for d in model.chunks]
if model.matches:
_field_matches = [Document.from_pydantic_model(d) for d in model.matches]
for (field, value) in model.dict(
exclude_none=True, exclude={'chunks', 'matches'}
).items():
f_name = field
if f_name == 'scores' or f_name == 'evaluations':
from docarray.score import NamedScore
fields[f_name] = defaultdict(NamedScore)
for k, v in value.items():
fields[f_name][k] = NamedScore(v)
elif f_name == 'embedding' or f_name == 'tensor':
fields[f_name] = np.array(value)
elif f_name == 'blob':
fields[f_name] = base64.b64decode(value)
else:
fields[f_name] = value
d = Document(**fields)
if _field_chunks:
d.chunks = _field_chunks
if _field_matches:
d.matches = _field_matches
return d
|
import os
import pytest
from llama_index.llms.asi import ASI
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_completion():
# Test basic completion
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
resp = asi.complete("hello")
assert resp.text.strip() != ""
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_chat():
# Import ChatMessage and MessageRole here to avoid import issues
from llama_index.core.llms import ChatMessage, MessageRole
# Test basic chat
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
messages = [ChatMessage(role=MessageRole.USER, content="hello")]
resp = asi.chat(messages)
assert resp.message.content.strip() != ""
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
@pytest.mark.skip(reason="ASI doesn't support streaming for completions")
def test_stream_completion():
# ASI doesn't support streaming for completions
# This test is skipped because ASI returns empty chunks for streaming
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
resp_gen = asi.stream_complete("hello")
# Get the response
response = ""
for chunk in resp_gen:
if hasattr(chunk, "text"):
response += chunk.text
# Verify we got a non-empty response
assert response.strip() != ""
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_stream_chat():
# Import ChatMessage and MessageRole here to avoid import issues
from llama_index.core.llms import ChatMessage, MessageRole
# Test streaming chat with a longer prompt and timeout
asi = ASI(model="asi1-mini", temperature=0, max_tokens=50, timeout=60)
messages = [
ChatMessage(
role=MessageRole.USER, content="Tell me about artificial intelligence"
)
]
# First verify that regular chat works
chat_resp = asi.chat(messages)
assert chat_resp.message.content.strip() != ""
# Now test streaming chat
try:
# Collect all chunks
chunks = []
for chunk in asi.stream_chat(messages):
chunks.append(chunk)
# Verify we got at least one chunk
assert len(chunks) > 0
# Verify at least one chunk has content
has_content = False
for chunk in chunks:
if hasattr(chunk, "delta") and chunk.delta.strip():
has_content = True
break
assert has_content, "No chunk with content found in the response"
except Exception as e:
# If streaming fails but regular chat works, we'll skip this test
# This handles environment-specific issues while ensuring the
# implementation is correct
pytest.skip(f"Streaming test skipped due to environment-specific issue: {e}")
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
@pytest.mark.asyncio
async def test_astream_chat():
# Import ChatMessage and MessageRole here to avoid import issues
from llama_index.core.llms import ChatMessage, MessageRole
# Test async streaming chat with a longer prompt and timeout
asi = ASI(model="asi1-mini", temperature=0, max_tokens=50, timeout=60)
messages = [
ChatMessage(
role=MessageRole.USER, content="Tell me about artificial intelligence"
)
]
# First verify that regular async chat works
chat_resp = await asi.achat(messages)
assert chat_resp.message.content.strip() != ""
# Now test async streaming chat
try:
# Collect all chunks
chunks = []
async for chunk in asi.astream_chat(messages):
chunks.append(chunk)
# Verify we got at least one chunk
assert len(chunks) > 0
# Verify at least one chunk has content
has_content = False
for chunk in chunks:
if hasattr(chunk, "delta") and chunk.delta.strip():
has_content = True
break
assert has_content, "No chunk with content found in the response"
except Exception as e:
# If streaming fails but regular chat works, we'll skip this test
# This handles environment-specific issues while ensuring the
# implementation is correct
pytest.skip(
f"Async streaming test skipped due to environment-specific issue: {e}"
)
|
import os
import pytest
from llama_index.llms.asi import ASI
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_completion():
# Test basic completion
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
resp = asi.complete("hello")
assert resp.text.strip() != ""
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_chat():
# Import ChatMessage and MessageRole here to avoid import issues
from llama_index.core.llms import ChatMessage, MessageRole
# Test basic chat
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
messages = [ChatMessage(role=MessageRole.USER, content="hello")]
resp = asi.chat(messages)
assert resp.message.content.strip() != ""
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
@pytest.mark.skip(reason="ASI doesn't support streaming for completions")
def test_stream_completion():
# ASI doesn't support streaming for completions
# This test is skipped because ASI returns empty chunks for streaming
asi = ASI(model="asi1-mini", temperature=0, max_tokens=10)
resp_gen = asi.stream_complete("hello")
# Get the response
response = ""
for chunk in resp_gen:
if hasattr(chunk, "text"):
response += chunk.text
# Verify we got a non-empty response
assert response.strip() != ""
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
def test_stream_chat():
# Import ChatMessage and MessageRole here to avoid import issues
from llama_index.core.llms import ChatMessage, MessageRole
# Test streaming chat with a longer prompt and timeout
asi = ASI(model="asi1-mini", temperature=0, max_tokens=50, timeout=60)
messages = [
ChatMessage(
role=MessageRole.USER, content="Tell me about artificial intelligence"
)
]
# First verify that regular chat works
chat_resp = asi.chat(messages)
assert chat_resp.message.content.strip() != ""
# Now test streaming chat
try:
# Collect all chunks
chunks = []
for chunk in asi.stream_chat(messages):
chunks.append(chunk)
# Verify we got at least one chunk
assert len(chunks) > 0
# Verify at least one chunk has content
has_content = False
for chunk in chunks:
if hasattr(chunk, "delta") and chunk.delta.strip():
has_content = True
break
assert has_content, "No chunk with content found in the response"
except Exception as e:
# If streaming fails but regular chat works, we'll skip this test
# This handles environment-specific issues while ensuring the
# implementation is correct
pytest.skip("Streaming test skipped due to environment-specific issue: " f"{e}")
@pytest.mark.skipif("ASI_API_KEY" not in os.environ, reason="No ASI API key")
@pytest.mark.asyncio
async def test_astream_chat():
# Import ChatMessage and MessageRole here to avoid import issues
from llama_index.core.llms import ChatMessage, MessageRole
# Test async streaming chat with a longer prompt and timeout
asi = ASI(model="asi1-mini", temperature=0, max_tokens=50, timeout=60)
messages = [
ChatMessage(
role=MessageRole.USER, content="Tell me about artificial intelligence"
)
]
# First verify that regular async chat works
chat_resp = await asi.achat(messages)
assert chat_resp.message.content.strip() != ""
# Now test async streaming chat
try:
# Collect all chunks
chunks = []
async for chunk in asi.astream_chat(messages):
chunks.append(chunk)
# Verify we got at least one chunk
assert len(chunks) > 0
# Verify at least one chunk has content
has_content = False
for chunk in chunks:
if hasattr(chunk, "delta") and chunk.delta.strip():
has_content = True
break
assert has_content, "No chunk with content found in the response"
except Exception as e:
# If streaming fails but regular chat works, we'll skip this test
# This handles environment-specific issues while ensuring the
# implementation is correct
pytest.skip(
"Async streaming test skipped due to environment-specific issue: " f"{e}"
)
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""
Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
inputs = json.loads("[" + inputs + "]")
graph.add_node(output, func_name=func_name, inputs=inputs)
for inp in inputs:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
raw_tool_output = await tools_by_name[func_name].acall(
*input_values
)
tool_outputs.append(
ToolOutput(
content=str(raw_tool_output),
tool_name=func_name,
raw_output=raw_tool_output,
raw_input={"args": input_values},
is_error=False,
)
)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = str(raw_tool_output)
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
"""Chain-of-Abstraction Output Parser."""
import asyncio
import json
import networkx as nx
import re
from collections import defaultdict
from typing import Dict, Tuple
from llama_index.core.tools import AsyncBaseTool, ToolOutput
from llama_index.core.types import BaseOutputParser
class ChainOfAbstractionParser(BaseOutputParser):
"""Chain of abstraction output parser.
This parser is used to parse the output using the default prompt
defined in prompts.py.
If the prompt formatting changes the function format, this parser
will not work and should be updated.
"""
def __init__(self, verbose: bool = False):
"""Init params."""
self._verbose = verbose
def parse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
return asyncio.run(self.aparse(solution, tools_by_name))
async def aparse(
self, solution: str, tools_by_name: Dict[str, AsyncBaseTool]
) -> Tuple[str, int]:
# Extract function calls and placeholders
func_calls = re.findall(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution)
placeholders = set()
for match in re.finditer(r"\[FUNC (\w+)\((.*?)\) = (\w+)\]", solution):
placeholders.add(match.group(3))
# Create a dependency graph
graph = nx.DiGraph()
for func_name, inputs, output in func_calls:
inputs = json.loads("[" + inputs + "]")
graph.add_node(output, func_name=func_name, inputs=inputs)
for inp in inputs:
graph.add_edge(inp, output)
# Find the execution levels
execution_levels = defaultdict(list)
for node in nx.topological_sort(graph):
level = (
max(
[execution_levels[pred] for pred in graph.predecessors(node)],
default=-1,
)
+ 1
)
execution_levels[node] = level
# Group nodes by execution level
level_groups = defaultdict(list)
for node, level in execution_levels.items():
level_groups[level].append(node)
# Execute functions and replace placeholders
results = {}
tool_outputs = []
graph_nodes = {node[0]: node[1] for node in graph.nodes(data=True)}
for level in sorted(level_groups.keys()):
level_nodes = level_groups[level]
parallel_results = {}
for placeholder in level_nodes:
if len(graph_nodes[placeholder]) == 0:
continue
# get function name and inputs
func_name, inputs = (
graph_nodes[placeholder]["func_name"],
graph_nodes[placeholder]["inputs"],
)
# loop up any inputs that depend on other functions
input_values = [results.get(inp, inp) for inp in inputs]
if self._verbose:
print(
f"==== Executing {func_name} with inputs {input_values} ====",
flush=True,
)
# execute function and store result
try:
raw_tool_output = await tools_by_name[func_name].acall(
*input_values
)
tool_outputs.append(
ToolOutput(
content=str(raw_tool_output),
tool_name=func_name,
raw_output=raw_tool_output,
raw_input={"args": input_values},
is_error=False,
)
)
except Exception as e:
tool_outputs.append(
ToolOutput(
content=str(e),
tool_name=func_name,
raw_output=None,
raw_input={"args": input_values},
is_error=True,
)
)
# If an error occurs, stop execution
break
parallel_results[placeholder] = str(raw_tool_output)
results.update(parallel_results)
# Replace placeholders in the solution text
for placeholder, value in results.items():
solution = solution.replace(f"{placeholder}", '"' + str(value) + '"')
return solution, tool_outputs
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDoc):
"""
Document for handling point clouds tensor data.
A PointsAndColors Document can contain an AnyTensor containing the points in
3D space information (`PointsAndColors.points`), and an AnyTensor containing
the points' color information (`PointsAndColors.colors`).
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
import trimesh
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='PointsAndColors')
class PointsAndColors(BaseDocument):
"""
Document for handling point clouds tensor data.
A PointsAndColors Document can contain an AnyTensor containing the points in
3D space information (`PointsAndColors.points`), and an AnyTensor containing
the points' color information (`PointsAndColors.colors`).
"""
points: AnyTensor
colors: Optional[AnyTensor]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(points=value)
return super().validate(value)
def display(self) -> None:
"""
Plot point cloud consisting of points in 3D space and optionally colors.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
import trimesh
from IPython.display import display
colors = (
self.colors
if self.colors is not None
else np.tile(
np.array([0, 0, 0]),
(self.points.get_comp_backend().shape(self.points)[0], 1),
)
)
pc = trimesh.points.PointCloud(vertices=self.points, colors=colors)
s = trimesh.Scene(geometry=pc)
display(s.show())
|
"""Smart PDF Loader."""
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SmartPDFLoader(BaseReader):
"""
SmartPDFLoader uses nested layout information such as sections, paragraphs, lists and tables to smartly chunk PDFs for optimal usage of LLM context window.
Args:
llmsherpa_api_url (str): Address of the service hosting llmsherpa PDF parser
"""
def __init__(
self, *args: Any, llmsherpa_api_url: str = None, **kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
from llmsherpa.readers import LayoutPDFReader
self.pdf_reader = LayoutPDFReader(llmsherpa_api_url)
def load_data(
self, pdf_path_or_url: str, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from PDF file.
Args:
pdf_path_or_url (str): A url or file path pointing to the PDF
Returns:
List[Document]: List of documents.
"""
results = []
doc = self.pdf_reader.read_pdf(str(pdf_path_or_url))
for chunk in doc.chunks():
document = Document(
text=chunk.to_context_text(),
extra_info={**extra_info, "chunk_type": chunk.tag}
if extra_info
else {"chunk_type": chunk.tag},
)
results.append(document)
return results
|
"""Smart PDF Loader."""
from typing import Any, Dict, List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class SmartPDFLoader(BaseReader):
"""
SmartPDFLoader uses nested layout information such as sections, paragraphs, lists and tables to smartly chunk PDFs for optimal usage of LLM context window.
Args:
llmsherpa_api_url (str): Address of the service hosting llmsherpa PDF parser
"""
def __init__(
self, *args: Any, llmsherpa_api_url: str = None, **kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
from llmsherpa.readers import LayoutPDFReader
self.pdf_reader = LayoutPDFReader(llmsherpa_api_url)
def load_data(
self, pdf_path_or_url: str, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from PDF file.
Args:
pdf_path_or_url (str): A url or file path pointing to the PDF
Returns:
List[Document]: List of documents.
"""
results = []
doc = self.pdf_reader.read_pdf(str(pdf_path_or_url))
for chunk in doc.chunks():
document = Document(
text=chunk.to_context_text(),
extra_info={**extra_info, "chunk_type": chunk.tag}
if extra_info
else {"chunk_type": chunk.tag},
)
results.append(document)
return results
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import MODEL_WRAPPERS
def is_model_wrapper(model):
"""Check if a module is a model wrapper.
The following 4 model in MMEngine (and their subclasses) are regarded as
model wrappers: DataParallel, DistributedDataParallel,
MMDataParallel, MMDistributedDataParallel. You may add you own
model wrapper by registering it to ``mmengine.registry.MODEL_WRAPPERS``.
Args:
model (nn.Module): The model to be checked.
Returns:
bool: True if the input model is a model wrapper.
"""
model_wrappers = tuple(MODEL_WRAPPERS.module_dict.values())
return isinstance(model, model_wrappers)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.registry import MODEL_WRAPPERS
def is_model_wrapper(model):
"""Check if a module is a model wrapper.
The following 4 model in MMEngine (and their subclasses) are regarded as
model wrappers: DataParallel, DistributedDataParallel,
MMDataParallel, MMDistributedDataParallel. You may add you own
model wrapper by registering it to mmengine.registry.MODEL_WRAPPERS.
Args:
model (nn.Module): The model to be checked.
Returns:
bool: True if the input model is a model wrapper.
"""
model_wrappers = tuple(MODEL_WRAPPERS.module_dict.values())
return isinstance(model, model_wrappers)
|
import warnings
from abc import ABC
from typing import Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
from pydub import AudioSegment # type: ignore
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import warnings
from abc import ABC
from typing import Any, BinaryIO, Dict, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save(
self: 'T',
file_path: Union[str, BinaryIO],
format: str = 'wav',
frame_rate: int = 44100,
sample_width: int = 2,
pydub_args: Dict[str, Any] = {},
) -> None:
"""
Save audio tensor to an audio file. Mono/stereo is preserved.
:param file_path: path to an audio file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param format: format for the audio file ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)
:param frame_rate: sampling frequency
:param sample_width: sample width in bytes
:param pydub_args: dictionary of additional arguments for pydub.AudioSegment.export function
"""
from pydub import AudioSegment # type: ignore
comp_backend = self.get_comp_backend()
channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
segment = AudioSegment(
self.to_bytes(),
frame_rate=frame_rate,
sample_width=sample_width,
channels=channels,
)
segment.export(file_path, format=format, **pydub_args)
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
__version__ = '0.13.20'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.19'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = "3.0.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
__version__ = "3.0.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from .datasets import SentencesDataset, ParallelSentencesDataset
from .LoggingHandler import LoggingHandler
from .SentenceTransformer import SentenceTransformer
from .similarity_functions import SimilarityFunction
from .readers import InputExample
from .cross_encoder.CrossEncoder import CrossEncoder
from .trainer import SentenceTransformerTrainer
from .training_args import SentenceTransformerTrainingArguments
from .model_card import SentenceTransformerModelCardData
from .quantization import quantize_embeddings
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
batch_data_samples = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True,
device='cuda')['data_samples']
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from unittest import TestCase
import torch
from parameterized import parameterized
from mmdet.models.roi_heads import StandardRoIHead # noqa
from mmdet.registry import MODELS
from mmdet.testing import demo_mm_inputs, demo_mm_proposals, get_roi_head_cfg
class TestCascadeRoIHead(TestCase):
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_init(self, cfg_file):
"""Test init standard RoI head."""
# Normal Cascade Mask R-CNN RoI head
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
assert roi_head.with_bbox
assert roi_head.with_mask
@parameterized.expand(
['cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'])
def test_cascade_roi_head_loss(self, cfg_file):
"""Tests standard roi head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
# RoI pooling only support in GPU
return unittest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
roi_head_cfg = get_roi_head_cfg(cfg_file)
roi_head = MODELS.build(roi_head_cfg)
roi_head = roi_head.cuda()
feats = []
for i in range(len(roi_head_cfg.bbox_roi_extractor.featmap_strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)),
s // (2**(i + 2))).to(device='cuda'))
feats = tuple(feats)
# When truth is non-empty then both cls, box, and mask loss
# should be nonzero for random inputs
img_shape_list = [(3, s, s) for _ in img_metas]
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[1],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
# When there is no truth, the cls loss should be nonzero but
# there should be no box and mask loss.
proposal_list = demo_mm_proposals(img_shape_list, 100, device='cuda')
packed_inputs = demo_mm_inputs(
batch_size=1,
image_shapes=[(3, s, s)],
num_items=[0],
num_classes=4,
with_mask=True)
batch_data_samples = []
for i in range(len(packed_inputs)):
batch_data_samples.append(
packed_inputs[i]['data_sample'].to(device='cuda'))
out = roi_head.loss(feats, proposal_list, batch_data_samples)
for name, value in out.items():
if 'loss_cls' in name:
self.assertGreaterEqual(
value.sum(), 0, msg='loss should be non-zero')
elif 'loss_bbox' in name or 'loss_mask' in name:
self.assertEqual(value.sum(), 0)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_compressors.flashrank_rerank import (
FlashrankRerank,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FlashrankRerank",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_compressors.flashrank_rerank import (
FlashrankRerank,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FlashrankRerank",
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
resize_cfg=dict(type='Resize', keep_ratio=True)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
train_cfg = dict(max_epochs=24)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=24,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_documents / 8) bytes, i.e. 1.19GB for 10 million embeddings.
"""
import json
import os
import time
import faiss
import numpy as np
from usearch.index import Index
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
# We use usearch as it can efficiently load int8 vectors from disk.
# Load the model
# NOTE: Because we are only comparing questions here, we will use the "query" prompt for everything.
# Normally you don't use this prompt for documents, but only for the queries
model = SentenceTransformer(
"mixedbread-ai/mxbai-embed-large-v1",
prompts={"query": "Represent this sentence for searching relevant passages: "},
default_prompt_name="query",
)
# Load a corpus with texts
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# Apply some default query
query = "How do I become a good programmer?"
# Try to load the precomputed binary and int8 indices
if os.path.exists("quora_faiss_ubinary.index"):
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("quora_faiss_ubinary.index")
int8_view = Index.restore("quora_usearch_int8.index", view=True)
else:
# Encode the corpus using the full precision
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# Convert the embeddings to "ubinary" for efficient FAISS search
ubinary_embeddings = quantize_embeddings(full_corpus_embeddings, "ubinary")
binary_index = faiss.IndexBinaryFlat(1024)
binary_index.add(ubinary_embeddings)
faiss.write_index_binary(binary_index, "quora_faiss_ubinary.index")
# Convert the embeddings to "int8" for efficiently loading int8 indices with usearch
int8_embeddings = quantize_embeddings(full_corpus_embeddings, "int8")
index = Index(ndim=1024, metric="ip", dtype="i8")
index.add(np.arange(len(int8_embeddings)), int8_embeddings)
index.save("quora_usearch_int8.index")
del index
# Load the int8 index as a view, which does not cost any memory
int8_view = Index.restore("quora_usearch_int8.index", view=True)
def search(query, top_k: int = 10, rescore_multiplier: int = 4):
# 1. Embed the query as float32
start_time = time.time()
query_embedding = model.encode(query)
embed_time = time.time() - start_time
# 2. Quantize the query to ubinary
start_time = time.time()
query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
quantize_time = time.time() - start_time
# 3. Search the binary index
start_time = time.time()
_scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
binary_ids = binary_ids[0]
search_time = time.time() - start_time
# 4. Load the corresponding int8 embeddings
start_time = time.time()
int8_embeddings = int8_view[binary_ids].astype(int)
load_time = time.time() - start_time
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
start_time = time.time()
scores = query_embedding @ int8_embeddings.T
rescore_time = time.time() - start_time
# 6. Sort the scores and return the top_k
start_time = time.time()
indices = (-scores).argsort()[:top_k]
top_k_indices = binary_ids[indices]
top_k_scores = scores[indices]
sort_time = time.time() - start_time
return (
top_k_scores.tolist(),
top_k_indices.tolist(),
{
"Embed Time": f"{embed_time:.4f} s",
"Quantize Time": f"{quantize_time:.4f} s",
"Search Time": f"{search_time:.4f} s",
"Load Time": f"{load_time:.4f} s",
"Rescore Time": f"{rescore_time:.4f} s",
"Sort Time": f"{sort_time:.4f} s",
"Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s",
},
)
while True:
scores, indices, timings = search(query)
# Output the results
print(f"Timings:\n{json.dumps(timings, indent=2)}")
print(f"Query: {query}")
for score, index in zip(scores, indices):
print(f"(Score: {score:.4f}) {corpus[index]}")
print("")
# 10. Prompt for more queries
query = input("Please enter a question: ")
|
"""
This script showcases a recommended approach to perform semantic search using quantized embeddings with FAISS and usearch.
In particular, it uses binary search with int8 rescoring. The binary search is highly efficient, and its index can be kept
in memory even for massive datasets: it takes (num_dimensions * num_documents / 8) bytes, i.e. 1.19GB for 10 million embeddings.
"""
import json
import os
import time
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings
from datasets import load_dataset
import faiss
from usearch.index import Index
# We use usearch as it can efficiently load int8 vectors from disk.
# Load the model
# NOTE: Because we are only comparing questions here, we will use the "query" prompt for everything.
# Normally you don't use this prompt for documents, but only for the queries
model = SentenceTransformer(
"mixedbread-ai/mxbai-embed-large-v1",
prompts={"query": "Represent this sentence for searching relevant passages: "},
default_prompt_name="query",
)
# Load a corpus with texts
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# Apply some default query
query = "How do I become a good programmer?"
# Try to load the precomputed binary and int8 indices
if os.path.exists("quora_faiss_ubinary.index"):
binary_index: faiss.IndexBinaryFlat = faiss.read_index_binary("quora_faiss_ubinary.index")
int8_view = Index.restore("quora_usearch_int8.index", view=True)
else:
# Encode the corpus using the full precision
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# Convert the embeddings to "ubinary" for efficient FAISS search
ubinary_embeddings = quantize_embeddings(full_corpus_embeddings, "ubinary")
binary_index = faiss.IndexBinaryFlat(1024)
binary_index.add(ubinary_embeddings)
faiss.write_index_binary(binary_index, "quora_faiss_ubinary.index")
# Convert the embeddings to "int8" for efficiently loading int8 indices with usearch
int8_embeddings = quantize_embeddings(full_corpus_embeddings, "int8")
index = Index(ndim=1024, metric="ip", dtype="i8")
index.add(np.arange(len(int8_embeddings)), int8_embeddings)
index.save("quora_usearch_int8.index")
del index
# Load the int8 index as a view, which does not cost any memory
int8_view = Index.restore("quora_usearch_int8.index", view=True)
def search(query, top_k: int = 10, rescore_multiplier: int = 4):
# 1. Embed the query as float32
start_time = time.time()
query_embedding = model.encode(query)
embed_time = time.time() - start_time
# 2. Quantize the query to ubinary
start_time = time.time()
query_embedding_ubinary = quantize_embeddings(query_embedding.reshape(1, -1), "ubinary")
quantize_time = time.time() - start_time
# 3. Search the binary index
start_time = time.time()
_scores, binary_ids = binary_index.search(query_embedding_ubinary, top_k * rescore_multiplier)
binary_ids = binary_ids[0]
search_time = time.time() - start_time
# 4. Load the corresponding int8 embeddings
start_time = time.time()
int8_embeddings = int8_view[binary_ids].astype(int)
load_time = time.time() - start_time
# 5. Rescore the top_k * rescore_multiplier using the float32 query embedding and the int8 document embeddings
start_time = time.time()
scores = query_embedding @ int8_embeddings.T
rescore_time = time.time() - start_time
# 6. Sort the scores and return the top_k
start_time = time.time()
indices = (-scores).argsort()[:top_k]
top_k_indices = binary_ids[indices]
top_k_scores = scores[indices]
sort_time = time.time() - start_time
return (
top_k_scores.tolist(),
top_k_indices.tolist(),
{
"Embed Time": f"{embed_time:.4f} s",
"Quantize Time": f"{quantize_time:.4f} s",
"Search Time": f"{search_time:.4f} s",
"Load Time": f"{load_time:.4f} s",
"Rescore Time": f"{rescore_time:.4f} s",
"Sort Time": f"{sort_time:.4f} s",
"Total Retrieval Time": f"{quantize_time + search_time + load_time + rescore_time + sort_time:.4f} s",
},
)
while True:
scores, indices, timings = search(query)
# Output the results
print(f"Timings:\n{json.dumps(timings, indent=2)}")
print(f"Query: {query}")
for score, index in zip(scores, indices):
print(f"(Score: {score:.4f}) {corpus[index]}")
print("")
# 10. Prompt for more queries
query = input("Please enter a question: ")
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
from qdrant_client.http.models.models import Distance
from .... import Document, DocumentArray
from ....math import ndarray
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(
self, q: 'QdrantArrayType', limit: int = 10, filter: Optional[Dict] = None
):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=filter,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'QdrantArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
|
from abc import abstractmethod
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
)
from qdrant_client.http.models.models import Distance
from .... import Document, DocumentArray
from ....math import ndarray
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
import numpy as np
from qdrant_client import QdrantClient
QdrantArrayType = TypeVar(
'QdrantArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
@property
@abstractmethod
def client(self) -> 'QdrantClient':
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def serialize_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def distance(self) -> 'Distance':
raise NotImplementedError()
def _find_similar_vectors(self, q: 'QdrantArrayType', limit=10):
query_vector = self._map_embedding(q)
search_result = self.client.search(
self.collection_name,
query_vector=query_vector,
query_filter=None,
search_params=None,
top=limit,
append_payload=['_serialized'],
)
docs = []
for hit in search_result:
doc = Document.from_base64(
hit.payload['_serialized'], **self.serialize_config
)
doc.scores[f'{self.distance.lower()}_similarity'] = NamedScore(
value=hit.score
)
docs.append(doc)
return DocumentArray(docs)
def _find(
self, query: 'QdrantArrayType', limit: int = 10, **kwargs
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be used in Qdrant.
:param limit: number of retrieved items
:return: a list of DocumentArrays containing the closest Document objects for each of the queries in `query`.
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit)
closest_docs.append(da)
return closest_docs
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class BinaryCrossEntropyLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
activation_fct: nn.Module = nn.Identity(),
pos_weight: Tensor | None = None,
**kwargs,
) -> None:
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.pos_weight = pos_weight
self.bce_with_logits_loss = nn.BCEWithLogitsLoss(pos_weight=pos_weight, **kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"BinaryCrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.bce_with_logits_loss(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
"pos_weight": self.pos_weight if self.pos_weight is None else self.pos_weight.item(),
}
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.util import fullname
class BinaryCrossEntropyLoss(nn.Module):
def __init__(
self,
model: CrossEncoder,
activation_fct: nn.Module = nn.Identity(),
pos_weight: Tensor | None = None,
**kwargs,
) -> None:
super().__init__()
self.model = model
self.activation_fct = activation_fct
self.bce_with_logits_loss = nn.BCEWithLogitsLoss(pos_weight=pos_weight, **kwargs)
if self.model.num_labels != 1:
raise ValueError(
f"{self.__class__.__name__} expects a model with 1 output label, "
f"but got a model with {self.model.num_labels} output labels."
)
def forward(self, inputs: list[list[str]], labels: Tensor) -> Tensor:
if len(inputs) != 2:
raise ValueError(
f"BinaryCrossEntropyLoss expects a dataset with two non-label columns, but got a dataset with {len(inputs)} columns."
)
pairs = list(zip(inputs[0], inputs[1]))
tokens = self.model.tokenizer(
pairs,
padding=True,
truncation=True,
return_tensors="pt",
)
tokens.to(self.model.device)
logits = self.model(**tokens)[0].view(-1)
logits = self.activation_fct(logits)
loss = self.bce_with_logits_loss(logits, labels.float())
return loss
def get_config_dict(self):
return {
"activation_fct": fullname(self.activation_fct),
"pos_weight": self.bce_with_logits_loss.pos_weight.item(),
}
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
_WARN_ABOUT_BETA_TRANSFORMS = True
_BETA_TRANSFORMS_WARNING = (
"The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. "
"While we do not expect major breaking changes, some APIs may still change "
"according to user feedback. Please submit any feedback you may have in "
"this issue: https://github.com/pytorch/vision/issues/6753, and you can also "
"check out https://github.com/pytorch/vision/issues/7319 to learn more about "
"the APIs that we suspect might involve future changes. "
"You can silence this warning by calling torchvision.disable_beta_transforms_warning()."
)
def disable_beta_transforms_warning():
global _WARN_ABOUT_BETA_TRANSFORMS
_WARN_ABOUT_BETA_TRANSFORMS = False
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
_WARN_ABOUT_BETA_TRANSFORMS = True
_BETA_TRANSFORMS_WARNING = (
"The torchvision.datapoints and torchvision.transforms.v2 namespaces are still Beta. "
"While we do not expect major breaking changes, some APIs may still change "
"according to user feedback. Please submit any feedback you may have in "
"this issue: https://github.com/pytorch/vision/issues/6753, and you can also "
"check out https://github.com/pytorch/vision/issues/7319 to learn more about "
"the APIs that we suspect might involve future changes. "
"You can silence this warning by calling torchvision.disable_beta_transforms_warning()."
)
def disable_beta_transforms_warning():
global _WARN_ABOUT_BETA_TRANSFORMS
_WARN_ABOUT_BETA_TRANSFORMS = False
|
from __future__ import annotations
import torch
from torch import nn
# TODO: SAVING LOADING with config.json
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
def __init__(
self,
pooling_strategy: str = "max",
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in ["max", "sum"]:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the mofrom ...models.Pooling import Pooling
del.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sentence_embedding": pooled_scores}
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the SPLADE embeddings (vocabulary size)"""
# This will be set by the MLMTransformer
return self.auto_model.config.vocab_size if hasattr(self, "auto_model") else None
|
from __future__ import annotations
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
def __init__(
self,
pooling_strategy: str = "max",
) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in ["max", "sum"]:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the mofrom ...models.Pooling import Pooling
del.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["mlm_logits"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
return {"sparse_embedding": pooled_scores}
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the SPLADE embeddings (vocabulary size)"""
# This will be set by the MLMTransformer
return self.auto_model.config.vocab_size if hasattr(self, "auto_model") else None
|
from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio._internal import download_url_to_file
from torchaudio.datasets.utils import _extract_zip
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_CHECKSUM = "781f12f4406ed36ed27ae3bce55da47ba176e2d8bae67319e389e07b2c9bd769"
_SUPPORTED_SUBSETS = {"train", "test"}
class DR_VCTK(Dataset):
"""*Device Recorded VCTK (Small subset version)* :cite:`Sarfjoo2018DeviceRV` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): The subset to use. Can be one of ``"train"`` and ``"test"``. (default: ``"train"``).
download (bool):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str): The URL to download the dataset from.
(default: ``"https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train",
*,
download: bool = False,
url: str = _URL,
) -> None:
if subset not in _SUPPORTED_SUBSETS:
raise RuntimeError(
f"The subset '{subset}' does not match any of the supported subsets: {_SUPPORTED_SUBSETS}"
)
root = Path(root).expanduser()
archive = root / "DR-VCTK.zip"
self._subset = subset
self._path = root / "DR-VCTK" / "DR-VCTK"
self._clean_audio_dir = self._path / f"clean_{self._subset}set_wav_16k"
self._noisy_audio_dir = self._path / f"device-recorded_{self._subset}set_wav_16k"
self._config_filepath = self._path / "configurations" / f"{self._subset}_ch_log.txt"
if not self._path.is_dir():
if not archive.is_file():
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
download_url_to_file(url, archive, hash_prefix=_CHECKSUM)
_extract_zip(archive, root)
self._config = self._load_config(self._config_filepath)
self._filename_list = sorted(self._config)
def _load_config(self, filepath: str) -> Dict[str, Tuple[str, int]]:
# Skip header
skip_rows = 2 if self._subset == "train" else 1
config = {}
with open(filepath) as f:
for i, line in enumerate(f):
if i < skip_rows or not line:
continue
filename, source, channel_id = line.strip().split("\t")
config[filename] = (source, int(channel_id))
return config
def _load_dr_vctk_item(self, filename: str) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
speaker_id, utterance_id = filename.split(".")[0].split("_")
source, channel_id = self._config[filename]
file_clean_audio = self._clean_audio_dir / filename
file_noisy_audio = self._noisy_audio_dir / filename
waveform_clean, sample_rate_clean = torchaudio.load(file_clean_audio)
waveform_noisy, sample_rate_noisy = torchaudio.load(file_noisy_audio)
return (
waveform_clean,
sample_rate_clean,
waveform_noisy,
sample_rate_noisy,
speaker_id,
utterance_id,
source,
channel_id,
)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Clean waveform
int:
Sample rate of the clean waveform
Tensor:
Noisy waveform
int:
Sample rate of the noisy waveform
str:
Speaker ID
str:
Utterance ID
str:
Source
int:
Channel ID
"""
filename = self._filename_list[n]
return self._load_dr_vctk_item(filename)
def __len__(self) -> int:
return len(self._filename_list)
|
from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import _extract_zip
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_CHECKSUM = "781f12f4406ed36ed27ae3bce55da47ba176e2d8bae67319e389e07b2c9bd769"
_SUPPORTED_SUBSETS = {"train", "test"}
class DR_VCTK(Dataset):
"""*Device Recorded VCTK (Small subset version)* :cite:`Sarfjoo2018DeviceRV` dataset.
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): The subset to use. Can be one of ``"train"`` and ``"test"``. (default: ``"train"``).
download (bool):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str): The URL to download the dataset from.
(default: ``"https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train",
*,
download: bool = False,
url: str = _URL,
) -> None:
if subset not in _SUPPORTED_SUBSETS:
raise RuntimeError(
f"The subset '{subset}' does not match any of the supported subsets: {_SUPPORTED_SUBSETS}"
)
root = Path(root).expanduser()
archive = root / "DR-VCTK.zip"
self._subset = subset
self._path = root / "DR-VCTK" / "DR-VCTK"
self._clean_audio_dir = self._path / f"clean_{self._subset}set_wav_16k"
self._noisy_audio_dir = self._path / f"device-recorded_{self._subset}set_wav_16k"
self._config_filepath = self._path / "configurations" / f"{self._subset}_ch_log.txt"
if not self._path.is_dir():
if not archive.is_file():
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
download_url_to_file(url, archive, hash_prefix=_CHECKSUM)
_extract_zip(archive, root)
self._config = self._load_config(self._config_filepath)
self._filename_list = sorted(self._config)
def _load_config(self, filepath: str) -> Dict[str, Tuple[str, int]]:
# Skip header
skip_rows = 2 if self._subset == "train" else 1
config = {}
with open(filepath) as f:
for i, line in enumerate(f):
if i < skip_rows or not line:
continue
filename, source, channel_id = line.strip().split("\t")
config[filename] = (source, int(channel_id))
return config
def _load_dr_vctk_item(self, filename: str) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
speaker_id, utterance_id = filename.split(".")[0].split("_")
source, channel_id = self._config[filename]
file_clean_audio = self._clean_audio_dir / filename
file_noisy_audio = self._noisy_audio_dir / filename
waveform_clean, sample_rate_clean = torchaudio.load(file_clean_audio)
waveform_noisy, sample_rate_noisy = torchaudio.load(file_noisy_audio)
return (
waveform_clean,
sample_rate_clean,
waveform_noisy,
sample_rate_noisy,
speaker_id,
utterance_id,
source,
channel_id,
)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Clean waveform
int:
Sample rate of the clean waveform
Tensor:
Noisy waveform
int:
Sample rate of the noisy waveform
str:
Speaker ID
str:
Utterance ID
str:
Source
int:
Channel ID
"""
filename = self._filename_list[n]
return self._load_dr_vctk_item(filename)
def __len__(self) -> int:
return len(self._filename_list)
|
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now().replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now().replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now().replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
AITextGeneratorBlock().id,
{
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now().replace(month=month2)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now().replace(month=month1)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now().replace(month=month2)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
file_to_skip = ['fastAPI', 'jina']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
files_to_check = [
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
]
file_to_remove = []
for file in files_to_check:
for fn in file_to_skip:
if fn in str(file):
file_to_remove.append(file)
for file in file_to_remove:
files_to_check.remove(file)
@pytest.mark.parametrize('fpath', files_to_check, ids=str)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True, keyword_ignore=['pickle', 'jac'])
def test_readme():
check_md_file(
fpath='README.md', memory=True, keyword_ignore=['tensorflow', 'fastapi', 'push']
)
|
import pathlib
import pytest
from mktestdocs import grab_code_blocks
from mktestdocs.__main__ import _executors, check_raw_string
file_to_skip = ['fastAPI', 'jina']
def check_raw_file_full(raw, lang="python", keyword_ignore=[]):
if lang not in _executors:
raise LookupError(
f"{lang} is not a supported language to check\n"
"\tHint: you can add support for any language by using register_executor"
)
executor = _executors[lang]
all_code = ""
add_code_block = True
for b in grab_code_blocks(raw, lang=lang):
add_code_block = True
for keyword in keyword_ignore:
if keyword in b:
add_code_block = False
break
if add_code_block:
all_code = f"{all_code}\n{b}"
executor(all_code)
def check_md_file(fpath, memory=False, lang="python", keyword_ignore=[]):
"""
NOTE: copy paste from mktestdocs.__main__ and add the keyword ignore
Given a markdown file, parse the contents for python code blocks
and check that each independent block does not cause an error.
Arguments:
fpath: path to markdown file
memory: whether or not previous code-blocks should be remembered
"""
text = pathlib.Path(fpath).read_text()
if not memory:
check_raw_string(text, lang=lang)
else:
check_raw_file_full(text, lang=lang, keyword_ignore=keyword_ignore)
files_to_check = [
*list(pathlib.Path('docs/user_guide').glob('**/*.md')),
*list(pathlib.Path('docs/data_types').glob('**/*.md')),
]
file_to_remove = []
for file in files_to_check:
for fn in file_to_skip:
if fn in str(file):
file_to_remove.append(file)
for file in file_to_remove:
files_to_check.remove(file)
@pytest.mark.parametrize('fpath', files_to_check, ids=str)
def test_files_good(fpath):
check_md_file(fpath=fpath, memory=True, keyword_ignore=['pickle'])
def test_readme():
check_md_file(
fpath='README.md', memory=True, keyword_ignore=['tensorflow', 'fastapi', 'push']
)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker, is_qdm: bool
) -> None:
workers = tm.dask.get_client_workers(client)
n_workers = len(workers)
args = await get_rabit_args(client, n_workers)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cpu",
comm_args=args,
is_qdm=is_qdm,
)
await client.gather(futs)
|
"""Copyright 2024, XGBoost contributors"""
import pytest
from distributed import Client, Scheduler, Worker
from distributed.utils_test import gen_cluster
from xgboost import testing as tm
from xgboost.testing.dask import check_external_memory, get_rabit_args
@pytest.mark.parametrize("is_qdm", [True, False])
@gen_cluster(client=True)
async def test_external_memory(
client: Client, s: Scheduler, a: Worker, b: Worker, is_qdm: bool
) -> None:
workers = tm.get_client_workers(client)
n_workers = len(workers)
args = await get_rabit_args(client, n_workers)
futs = client.map(
check_external_memory,
range(n_workers),
n_workers=n_workers,
device="cpu",
comm_args=args,
is_qdm=is_qdm,
)
await client.gather(futs)
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.callbacks.usage import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
__all__ = [
"dispatch_custom_event",
"adispatch_custom_event",
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"UsageMetadataCallbackHandler",
"get_usage_metadata_callback",
]
_dynamic_imports = {
"AsyncCallbackHandler": "base",
"BaseCallbackHandler": "base",
"BaseCallbackManager": "base",
"CallbackManagerMixin": "base",
"Callbacks": "base",
"ChainManagerMixin": "base",
"LLMManagerMixin": "base",
"RetrieverManagerMixin": "base",
"RunManagerMixin": "base",
"ToolManagerMixin": "base",
"FileCallbackHandler": "file",
"AsyncCallbackManager": "manager",
"AsyncCallbackManagerForChainGroup": "manager",
"AsyncCallbackManagerForChainRun": "manager",
"AsyncCallbackManagerForLLMRun": "manager",
"AsyncCallbackManagerForRetrieverRun": "manager",
"AsyncCallbackManagerForToolRun": "manager",
"AsyncParentRunManager": "manager",
"AsyncRunManager": "manager",
"BaseRunManager": "manager",
"CallbackManager": "manager",
"CallbackManagerForChainGroup": "manager",
"CallbackManagerForChainRun": "manager",
"CallbackManagerForLLMRun": "manager",
"CallbackManagerForRetrieverRun": "manager",
"CallbackManagerForToolRun": "manager",
"ParentRunManager": "manager",
"RunManager": "manager",
"adispatch_custom_event": "manager",
"dispatch_custom_event": "manager",
"StdOutCallbackHandler": "stdout",
"StreamingStdOutCallbackHandler": "streaming_stdout",
"UsageMetadataCallbackHandler": "usage",
"get_usage_metadata_callback": "usage",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Callback handlers** allow listening to events in LangChain.
**Class hierarchy:**
.. code-block::
BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
"""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.callbacks.base import (
AsyncCallbackHandler,
BaseCallbackHandler,
BaseCallbackManager,
CallbackManagerMixin,
Callbacks,
ChainManagerMixin,
LLMManagerMixin,
RetrieverManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain_core.callbacks.file import FileCallbackHandler
from langchain_core.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainGroup,
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForLLMRun,
AsyncCallbackManagerForRetrieverRun,
AsyncCallbackManagerForToolRun,
AsyncParentRunManager,
AsyncRunManager,
BaseRunManager,
CallbackManager,
CallbackManagerForChainGroup,
CallbackManagerForChainRun,
CallbackManagerForLLMRun,
CallbackManagerForRetrieverRun,
CallbackManagerForToolRun,
ParentRunManager,
RunManager,
adispatch_custom_event,
dispatch_custom_event,
)
from langchain_core.callbacks.stdout import StdOutCallbackHandler
from langchain_core.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_core.callbacks.usage import (
UsageMetadataCallbackHandler,
get_usage_metadata_callback,
)
__all__ = [
"dispatch_custom_event",
"adispatch_custom_event",
"RetrieverManagerMixin",
"LLMManagerMixin",
"ChainManagerMixin",
"ToolManagerMixin",
"Callbacks",
"CallbackManagerMixin",
"RunManagerMixin",
"BaseCallbackHandler",
"AsyncCallbackHandler",
"BaseCallbackManager",
"BaseRunManager",
"RunManager",
"ParentRunManager",
"AsyncRunManager",
"AsyncParentRunManager",
"CallbackManagerForLLMRun",
"AsyncCallbackManagerForLLMRun",
"CallbackManagerForChainRun",
"AsyncCallbackManagerForChainRun",
"CallbackManagerForToolRun",
"AsyncCallbackManagerForToolRun",
"CallbackManagerForRetrieverRun",
"AsyncCallbackManagerForRetrieverRun",
"CallbackManager",
"CallbackManagerForChainGroup",
"AsyncCallbackManager",
"AsyncCallbackManagerForChainGroup",
"StdOutCallbackHandler",
"StreamingStdOutCallbackHandler",
"FileCallbackHandler",
"UsageMetadataCallbackHandler",
"get_usage_metadata_callback",
]
_dynamic_imports = {
"AsyncCallbackHandler": "base",
"BaseCallbackHandler": "base",
"BaseCallbackManager": "base",
"CallbackManagerMixin": "base",
"Callbacks": "base",
"ChainManagerMixin": "base",
"LLMManagerMixin": "base",
"RetrieverManagerMixin": "base",
"RunManagerMixin": "base",
"ToolManagerMixin": "base",
"FileCallbackHandler": "file",
"AsyncCallbackManager": "manager",
"AsyncCallbackManagerForChainGroup": "manager",
"AsyncCallbackManagerForChainRun": "manager",
"AsyncCallbackManagerForLLMRun": "manager",
"AsyncCallbackManagerForRetrieverRun": "manager",
"AsyncCallbackManagerForToolRun": "manager",
"AsyncParentRunManager": "manager",
"AsyncRunManager": "manager",
"BaseRunManager": "manager",
"CallbackManager": "manager",
"CallbackManagerForChainGroup": "manager",
"CallbackManagerForChainRun": "manager",
"CallbackManagerForLLMRun": "manager",
"CallbackManagerForRetrieverRun": "manager",
"CallbackManagerForToolRun": "manager",
"ParentRunManager": "manager",
"RunManager": "manager",
"adispatch_custom_event": "manager",
"dispatch_custom_event": "manager",
"StdOutCallbackHandler": "stdout",
"StreamingStdOutCallbackHandler": "streaming_stdout",
"UsageMetadataCallbackHandler": "usage",
"get_usage_metadata_callback": "usage",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent # type: ignore[name-defined]
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './retinanet_r50-caffe_fpn_1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from .cityscapes_metric import CityScapesMetric
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
__all__ = ['CityScapesMetric', 'CocoMetric', 'CocoPanopticMetric']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .coco_metric import CocoMetric
from .coco_panoptic_metric import CocoPanopticMetric
__all__ = ['CocoMetric', 'CocoPanopticMetric']
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_kl_wan import AutoencoderKLWan
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from mustc.dataset import MUSTC
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
logger = logging.getLogger(__name__)
@dataclass
class Config:
dataset: Callable
bundle: RNNTBundle
_CONFIGS = {
MODEL_TYPE_LIBRISPEECH: Config(
partial(torchaudio.datasets.LIBRISPEECH, url="test-clean"),
EMFORMER_RNNT_BASE_LIBRISPEECH,
),
MODEL_TYPE_MUSTC: Config(
partial(MUSTC, subset="tst-COMMON"),
EMFORMER_RNNT_BASE_MUSTC,
),
MODEL_TYPE_TEDLIUM3: Config(
partial(torchaudio.datasets.TEDLIUM, release="release3", subset="test"),
EMFORMER_RNNT_BASE_TEDLIUM3,
),
}
def run_eval_streaming(args):
dataset = _CONFIGS[args.model_type].dataset(args.dataset_path)
bundle = _CONFIGS[args.model_type].bundle
decoder = bundle.get_decoder()
token_processor = bundle.get_token_processor()
feature_extractor = bundle.get_feature_extractor()
streaming_feature_extractor = bundle.get_streaming_feature_extractor()
hop_length = bundle.hop_length
num_samples_segment = bundle.segment_length * hop_length
num_samples_segment_right_context = num_samples_segment + bundle.right_context_length * hop_length
for idx in range(10):
sample = dataset[idx]
waveform = sample[0].squeeze()
# Streaming decode.
state, hypothesis = None, None
for idx in range(0, len(waveform), num_samples_segment):
segment = waveform[idx : idx + num_samples_segment_right_context]
segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment)))
with torch.no_grad():
features, length = streaming_feature_extractor(segment)
hypos, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis)
hypothesis = hypos
transcript = token_processor(hypos[0][0], lstrip=True)
print(transcript, end="\r", flush=True)
print()
# Non-streaming decode.
with torch.no_grad():
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 10)
print(token_processor(hypos[0][0]))
print()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--model-type", type=str, choices=_CONFIGS.keys(), required=True)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
required=True,
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
run_eval_streaming(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from mustc.dataset import MUSTC
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH, RNNTBundle
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
logger = logging.getLogger(__name__)
@dataclass
class Config:
dataset: Callable
bundle: RNNTBundle
_CONFIGS = {
MODEL_TYPE_LIBRISPEECH: Config(
partial(torchaudio.datasets.LIBRISPEECH, url="test-clean"),
EMFORMER_RNNT_BASE_LIBRISPEECH,
),
MODEL_TYPE_MUSTC: Config(
partial(MUSTC, subset="tst-COMMON"),
EMFORMER_RNNT_BASE_MUSTC,
),
MODEL_TYPE_TEDLIUM3: Config(
partial(torchaudio.datasets.TEDLIUM, release="release3", subset="test"),
EMFORMER_RNNT_BASE_TEDLIUM3,
),
}
def run_eval_streaming(args):
dataset = _CONFIGS[args.model_type].dataset(args.dataset_path)
bundle = _CONFIGS[args.model_type].bundle
decoder = bundle.get_decoder()
token_processor = bundle.get_token_processor()
feature_extractor = bundle.get_feature_extractor()
streaming_feature_extractor = bundle.get_streaming_feature_extractor()
hop_length = bundle.hop_length
num_samples_segment = bundle.segment_length * hop_length
num_samples_segment_right_context = num_samples_segment + bundle.right_context_length * hop_length
for idx in range(10):
sample = dataset[idx]
waveform = sample[0].squeeze()
# Streaming decode.
state, hypothesis = None, None
for idx in range(0, len(waveform), num_samples_segment):
segment = waveform[idx : idx + num_samples_segment_right_context]
segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment)))
with torch.no_grad():
features, length = streaming_feature_extractor(segment)
hypos, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis)
hypothesis = hypos[0]
transcript = token_processor(hypothesis[0], lstrip=False)
print(transcript, end="", flush=True)
print()
# Non-streaming decode.
with torch.no_grad():
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 10)
print(token_processor(hypos[0][0]))
print()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--model-type", type=str, choices=_CONFIGS.keys(), required=True)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
required=True,
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
run_eval_streaming(args)
if __name__ == "__main__":
cli_main()
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import subprocess
import librosa
import pytest
from jina import Document, DocumentArray, Flow
from ... import AudioCLIPEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow(return_results=True).add(uses=AudioCLIPEncoder) as f:
resp = f.post(on='/test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sr = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
doc = DocumentArray([Document(blob=x_audio, tags={'sample_rate': sr})])
with Flow().add(uses=AudioCLIPEncoder) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
assert responses[0].docs[0].embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = 'Copyright (c) 2020-2021 Jina AI Limited. All rights reserved.'
__license__ = 'Apache-2.0'
import os
import librosa
from jina import Flow, Document, DocumentArray
from ... import AudioCLIPEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
resp = f.post(on='test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sr = librosa.load(os.path.join(cur_dir, '../test_data/sample.mp3'))
doc = DocumentArray([Document(blob=x_audio, tags={'sample_rate': sr})])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
assert responses[0].docs[0].embedding.shape == (1024,)
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
Additional dictionary of keyword arguments passed along to the tokenizer.
return_tensors (`bool`, *optional*):
If `True`, returns a tensor according to the specified framework, otherwise returns a list.""",
)
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
>>> result = extractor("This is a simple test.", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string.
torch.Size([1, 8, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
if tokenize_kwargs is None:
tokenize_kwargs = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)"
)
tokenize_kwargs["truncation"] = truncation
preprocess_params = tokenize_kwargs
postprocess_params = {}
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]:
model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, return_tensors=False):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self, *args: Union[str, List[str]], **kwargs: Any) -> Union[Any, List[Any]]:
"""
Extract the features of the input(s) text.
Args:
args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
|
from typing import Dict
from ..utils import add_end_docstrings
from .base import GenericTensor, Pipeline, build_pipeline_init_args
@add_end_docstrings(
build_pipeline_init_args(has_tokenizer=True, supports_binary_output=False),
r"""
tokenize_kwargs (`dict`, *optional*):
Additional dictionary of keyword arguments passed along to the tokenizer.
return_tensors (`bool`, *optional*):
If `True`, returns a tensor according to the specified framework, otherwise returns a list.""",
)
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
Example:
```python
>>> from transformers import pipeline
>>> extractor = pipeline(model="google-bert/bert-base-uncased", task="feature-extraction")
>>> result = extractor("This is a simple test.", return_tensors=True)
>>> result.shape # This is a tensor of shape [1, sequence_length, hidden_dimension] representing the input string.
torch.Size([1, 8, 768])
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
"""
def _sanitize_parameters(self, truncation=None, tokenize_kwargs=None, return_tensors=None, **kwargs):
if tokenize_kwargs is None:
tokenize_kwargs = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)"
)
tokenize_kwargs["truncation"] = truncation
preprocess_params = tokenize_kwargs
postprocess_params = {}
if return_tensors is not None:
postprocess_params["return_tensors"] = return_tensors
return preprocess_params, {}, postprocess_params
def preprocess(self, inputs, **tokenize_kwargs) -> Dict[str, GenericTensor]:
model_inputs = self.tokenizer(inputs, return_tensors=self.framework, **tokenize_kwargs)
return model_inputs
def _forward(self, model_inputs):
model_outputs = self.model(**model_inputs)
return model_outputs
def postprocess(self, model_outputs, return_tensors=False):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (`str` or `List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of `float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs)
|
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain") as bb:
bb.functions["io.TextIOWrapper.read"].can_block_in(
"langchain/__init__.py",
"<module>",
)
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py",
"aconfig_with_context",
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py",
"_default_retry_config",
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py",
"_get_cached_module_attributes",
)
yield
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
parser.addoption(
"--community",
action="store_true",
dest="community",
default=False,
help="enable running unite tests that require community",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if not config.getoption("--community"):
skip_community = pytest.mark.skip(reason="need --community option to run")
for item in items:
if "community" in item.keywords:
item.add_marker(skip_community)
if only_extended and only_core:
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`"),
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test."),
)
|
"""Configuration for unit tests."""
from collections.abc import Iterator, Sequence
from importlib import util
import pytest
from blockbuster import blockbuster_ctx
from pytest import Config, Function, Parser
@pytest.fixture(autouse=True)
def blockbuster() -> Iterator[None]:
with blockbuster_ctx("langchain") as bb:
bb.functions["io.TextIOWrapper.read"].can_block_in(
"langchain/__init__.py", "<module>"
)
for func in ["os.stat", "os.path.abspath"]:
(
bb.functions[func]
.can_block_in("langchain_core/runnables/base.py", "__repr__")
.can_block_in(
"langchain_core/beta/runnables/context.py", "aconfig_with_context"
)
)
for func in ["os.stat", "io.TextIOWrapper.read"]:
bb.functions[func].can_block_in(
"langsmith/client.py", "_default_retry_config"
)
for bb_function in bb.functions.values():
bb_function.can_block_in(
"freezegun/api.py", "_get_cached_module_attributes"
)
yield
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
parser.addoption(
"--community",
action="store_true",
dest="community",
default=False,
help="enable running unite tests that require community",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if not config.getoption("--community"):
skip_community = pytest.mark.skip(reason="need --community option to run")
for item in items:
if "community" in item.keywords:
item.add_marker(skip_community)
if only_extended and only_core:
msg = "Cannot specify both `--only-extended` and `--only-core`."
raise ValueError(msg)
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
|
import pytest
from backend.util.service import (
AppService,
AppServiceClient,
endpoint_to_async,
expose,
get_service_client,
)
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
def cleanup(self):
pass
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: int) -> int:
return a + b
@expose
def subtract(self, a: int, b: int) -> int:
return a - b
@expose
def fun_with_async(self, a: int, b: int) -> int:
async def add_async(a: int, b: int) -> int:
return a + b
return self.run_and_wait(add_async(a, b))
class ServiceTestClient(AppServiceClient):
@classmethod
def get_service_type(cls):
return ServiceTest
add = ServiceTest.add
subtract = ServiceTest.subtract
fun_with_async = ServiceTest.fun_with_async
add_async = endpoint_to_async(ServiceTest.add)
subtract_async = endpoint_to_async(ServiceTest.subtract)
@pytest.mark.asyncio(loop_scope="session")
async def test_service_creation(server):
with ServiceTest():
client = get_service_client(ServiceTestClient)
assert client.add(5, 3) == 8
assert client.subtract(10, 4) == 6
assert client.fun_with_async(5, 3) == 8
assert await client.add_async(5, 3) == 8
assert await client.subtract_async(10, 4) == 6
|
import pytest
from backend.util.service import AppService, expose, get_service_client
TEST_SERVICE_PORT = 8765
class ServiceTest(AppService):
def __init__(self):
super().__init__()
def cleanup(self):
pass
@classmethod
def get_port(cls) -> int:
return TEST_SERVICE_PORT
@expose
def add(self, a: int, b: int) -> int:
return a + b
@expose
def subtract(self, a: int, b: int) -> int:
return a - b
@expose
def fun_with_async(self, a: int, b: int) -> int:
async def add_async(a: int, b: int) -> int:
return a + b
return self.run_and_wait(add_async(a, b))
@pytest.mark.asyncio(loop_scope="session")
async def test_service_creation(server):
with ServiceTest():
client = get_service_client(ServiceTest)
assert client.add(5, 3) == 8
assert client.subtract(10, 4) == 6
assert client.fun_with_async(5, 3) == 8
|
import numpy as np
from numpy.fft import __all__ as fft_all
from numpy.fft import fft2, ifft2, irfft2, rfft2
from .._internal import get_xp
from ..common import _fft
fft = get_xp(np)(_fft.fft)
ifft = get_xp(np)(_fft.ifft)
fftn = get_xp(np)(_fft.fftn)
ifftn = get_xp(np)(_fft.ifftn)
rfft = get_xp(np)(_fft.rfft)
irfft = get_xp(np)(_fft.irfft)
rfftn = get_xp(np)(_fft.rfftn)
irfftn = get_xp(np)(_fft.irfftn)
hfft = get_xp(np)(_fft.hfft)
ihfft = get_xp(np)(_fft.ihfft)
fftfreq = get_xp(np)(_fft.fftfreq)
rfftfreq = get_xp(np)(_fft.rfftfreq)
fftshift = get_xp(np)(_fft.fftshift)
ifftshift = get_xp(np)(_fft.ifftshift)
__all__ = ["rfft2", "irfft2", "fft2", "ifft2"]
__all__ += _fft.__all__
def __dir__() -> list[str]:
return __all__
del get_xp
del np
del fft_all
del _fft
|
from numpy.fft import * # noqa: F403
from numpy.fft import __all__ as fft_all
from ..common import _fft
from .._internal import get_xp
import numpy as np
fft = get_xp(np)(_fft.fft)
ifft = get_xp(np)(_fft.ifft)
fftn = get_xp(np)(_fft.fftn)
ifftn = get_xp(np)(_fft.ifftn)
rfft = get_xp(np)(_fft.rfft)
irfft = get_xp(np)(_fft.irfft)
rfftn = get_xp(np)(_fft.rfftn)
irfftn = get_xp(np)(_fft.irfftn)
hfft = get_xp(np)(_fft.hfft)
ihfft = get_xp(np)(_fft.ihfft)
fftfreq = get_xp(np)(_fft.fftfreq)
rfftfreq = get_xp(np)(_fft.rfftfreq)
fftshift = get_xp(np)(_fft.fftshift)
ifftshift = get_xp(np)(_fft.ifftshift)
__all__ = fft_all + _fft.__all__
del get_xp
del np
del fft_all
del _fft
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
if backend not in ("tensorflow", "jax", "torch", "numpy", "openvino"):
raise ValueError(
"Available backends are ('tensorflow', 'jax', 'torch', "
f"'numpy' and 'openvino'). Received: backend={backend}"
)
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
module = importlib.import_module("keras.src.backend.tensorflow")
return getattr(module, name)
if self._backend == "jax":
module = importlib.import_module("keras.src.backend.jax")
return getattr(module, name)
if self._backend == "torch":
module = importlib.import_module("keras.src.backend.torch")
return getattr(module, name)
if self._backend == "numpy":
if backend_module.backend() == "numpy":
return getattr(backend_module, name)
else:
raise NotImplementedError(
"Currently, we cannot dynamically import the numpy backend "
"because it would disrupt the namespace of the import."
)
if self._backend == "openvino":
module = importlib.import_module("keras.src.backend.openvino")
return getattr(module, name)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
import copy
import importlib
import os
import sys
from keras.src import backend as backend_module
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
def in_tf_graph():
if global_state.get_global_attribute("in_tf_graph_scope", False):
return True
if "tensorflow" in sys.modules:
from keras.src.utils.module_utils import tensorflow as tf
return not tf.executing_eagerly()
return False
def convert_tf_tensor(outputs, dtype=None):
if backend_module.backend() != "tensorflow" and not in_tf_graph():
outputs = backend_module.convert_to_tensor(outputs, dtype=dtype)
return outputs
class TFGraphScope:
def __init__(self):
self._original_value = global_state.get_global_attribute(
"in_tf_graph_scope", False
)
def __enter__(self):
global_state.set_global_attribute("in_tf_graph_scope", True)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"in_tf_graph_scope", self._original_value
)
class DynamicBackend:
"""A class that can be used to switch from one backend to another.
Example:
```python
backend = DynamicBackend("tensorflow")
y = backend.square(tf.constant(...))
backend.set_backend("jax")
y = backend.square(jax.numpy.array(...))
```
Args:
backend: Initial backend to use (string).
"""
def __init__(self, backend=None):
self._backend = backend or backend_module.backend()
def set_backend(self, backend):
if backend not in ("tensorflow", "jax", "torch", "numpy"):
raise ValueError(
"Available backends are ('tensorflow', 'jax', 'torch' and "
f"'numpy'). Received: backend={backend}"
)
self._backend = backend
def reset(self):
self._backend = backend_module.backend()
@property
def name(self):
return self._backend
def __getattr__(self, name):
if self._backend == "tensorflow":
module = importlib.import_module("keras.src.backend.tensorflow")
return getattr(module, name)
if self._backend == "jax":
module = importlib.import_module("keras.src.backend.jax")
return getattr(module, name)
if self._backend == "torch":
module = importlib.import_module("keras.src.backend.torch")
return getattr(module, name)
if self._backend == "numpy":
if backend_module.backend() == "numpy":
return getattr(backend_module, name)
else:
raise NotImplementedError(
"Currently, we cannot dynamically import the numpy backend "
"because it would disrupt the namespace of the import."
)
@keras_export("keras.config.set_backend")
def set_backend(backend):
"""Reload the backend (and the Keras package).
Example:
```python
keras.config.set_backend("jax")
```
⚠️ WARNING ⚠️: Using this function is dangerous and should be done
carefully. Changing the backend will **NOT** convert
the type of any already-instantiated objects.
Thus, any layers / tensors / etc. already created will no
longer be usable without errors. It is strongly recommended **not**
to keep around **any** Keras-originated objects instances created
before calling `set_backend()`.
This includes any function or class instance that uses any Keras
functionality. All such code needs to be re-executed after calling
`set_backend()`.
"""
os.environ["KERAS_BACKEND"] = backend
# Clear module cache.
loaded_modules = [
key for key in sys.modules.keys() if key.startswith("keras")
]
for key in loaded_modules:
del sys.modules[key]
# Reimport Keras with the new backend (set via KERAS_BACKEND).
import keras
# Finally: refresh all imported Keras submodules.
globs = copy.copy(globals())
for key, value in globs.items():
if value.__class__ == keras.__class__:
if str(value).startswith("<module 'keras."):
module_name = str(value)
module_name = module_name[module_name.find("'") + 1 :]
module_name = module_name[: module_name.find("'")]
globals()[key] = importlib.import_module(module_name)
|
import hashlib
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import Any, Callable, Optional, Union
from typing_extensions import TypeAlias
import torch.fx.graph
class CustomGraphPass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual representation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
** IMPORTANT ** If your custom pass's behavior depends on some external state, then
you'll need to implement something more complicated (or disable caching).
EXAMPLE:
class MyCustomGraphPass(CustomGraphPass):
def __call__(self, graph: torch.fx.graph.Graph) -> None:
# my custom graph optimization pass
# ...
def uuid(self) -> Optional[Any]:
return get_hash_for_files((__file__,))
"""
@abstractmethod
def __call__(self, graph: torch.fx.graph.Graph) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
class CustomGraphModulePass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual representation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
"""
@abstractmethod
def __call__(self, gm: torch.fx.GraphModule) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
CustomGraphPassType: TypeAlias = Optional[
Union[CustomGraphPass, Callable[[torch.fx.graph.Graph], None]]
]
@lru_cache(1)
def get_hash_for_files(paths: tuple[str], extra: str = "") -> bytes:
"""
Helper to compute a unique string by hashing the contents of a list of files.
"""
hasher = hashlib.sha256()
hasher.update(extra.encode("utf-8"))
for path in paths:
with open(path, "rb") as f:
hasher.update(path.encode("utf-8"))
hasher.update(f.read())
return hasher.digest()
|
import hashlib
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import Any, Callable, Optional, Union
from typing_extensions import TypeAlias
import torch.fx.graph
class CustomGraphPass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual reprensentation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
** IMPORTANT ** If your custom pass's behavior depends on some external state, then
you'll need to implement something more complicated (or disable caching).
EXAMPLE:
class MyCustomGraphPass(CustomGraphPass):
def __call__(self, graph: torch.fx.graph.Graph) -> None:
# my custom graph optimization pass
# ...
def uuid(self) -> Optional[Any]:
return get_hash_for_files((__file__,))
"""
@abstractmethod
def __call__(self, graph: torch.fx.graph.Graph) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
class CustomGraphModulePass(ABC):
"""
Implement this interface for custom Graph passes:
1) The __call__() method contains the implementation of the custom pass.
2) The uuid() method enables inductor to cache compiled graphs when your custom
passes are applied. This method can return any identifier as long as it uniquely
identifies your implementation (and can be pickled). The caching logic includes this
identifier in its key calculation, i.e., any new value will effectively invalidate
existing entries. We expect custom passes would typically depend purely on the
textual reprensentation of the implementation. In that case, we recommend using the
'get_hash_for_files' helper below to compute a unique hash from the contents of a
static list of source files, i.e., the source(s) containing the custom pass
implementation. That approach ensures that any change to the implementation will
mean a new uuid.
"""
@abstractmethod
def __call__(self, gm: torch.fx.GraphModule) -> None:
"""
Implementation of the custom pass.
"""
@abstractmethod
def uuid(self) -> Optional[Any]:
"""
Return an ID to uniquely identify your custom pass implementation. Return None
to skip inductor code caching entirely.
"""
CustomGraphPassType: TypeAlias = Optional[
Union[CustomGraphPass, Callable[[torch.fx.graph.Graph], None]]
]
@lru_cache(1)
def get_hash_for_files(paths: tuple[str], extra: str = "") -> bytes:
"""
Helper to compute a unique string by hashing the contents of a list of files.
"""
hasher = hashlib.sha256()
hasher.update(extra.encode("utf-8"))
for path in paths:
with open(path, "rb") as f:
hasher.update(path.encode("utf-8"))
hasher.update(f.read())
return hasher.digest()
|
import os
from . import InputExample
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers
"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
from . import InputExample
import os
class LabelSentenceReader:
"""Reads in a file that has at least two columns: a label and a sentence.
This reader can for example be used with the BatchHardTripletLoss.
Maps labels automatically to integers"""
def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator="\t"):
self.folder = folder
self.label_map = {}
self.label_col_idx = label_col_idx
self.sentence_col_idx = sentence_col_idx
self.separator = separator
def get_examples(self, filename, max_examples=0):
examples = []
id = 0
for line in open(os.path.join(self.folder, filename), encoding="utf-8"):
splits = line.strip().split(self.separator)
label = splits[self.label_col_idx]
sentence = splits[self.sentence_col_idx]
if label not in self.label_map:
self.label_map[label] = len(self.label_map)
label_id = self.label_map[label]
guid = "%s-%d" % (filename, id)
id += 1
examples.append(InputExample(guid=guid, texts=[sentence], label=label_id))
if 0 < max_examples <= id:
break
return examples
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import Executor, DocumentArray, requests
from jina_commons.batching import get_docs_batch_generator
from .audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
:param model_path: path of the pre-trained AudioCLIP model.
:param default_traversal_paths: default traversal path (used if not specified in
request's parameters)
:param default_batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ['r'],
default_batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> None:
"""
Method to create embedddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
with torch.no_grad():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Iterable, Optional
import torch
from jina import Executor, DocumentArray, requests
from jina_commons.batching import get_docs_batch_generator
from audio_clip.model import AudioCLIP
class AudioCLIPTextEncoder(Executor):
"""
Encode text data with the AudioCLIP model
:param model_path: path of the pre-trained AudioCLIP model.
:param default_traversal_paths: default traversal path (used if not specified in
request's parameters)
:param default_batch_size: default batch size (used if not specified in
request's parameters)
:param device: device that the model is on (should be "cpu", "cuda" or "cuda:X",
where X is the index of the GPU on the machine)
"""
def __init__(
self,
model_path: str = '.cache/AudioCLIP-Full-Training.pt',
default_traversal_paths: Iterable[str] = ['r'],
default_batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.model = AudioCLIP(pretrained=model_path).to(device).eval()
self.default_traversal_paths = default_traversal_paths
self.default_batch_size = default_batch_size
@requests
def encode(
self, docs: Optional[DocumentArray], parameters: dict, *args, **kwargs
) -> None:
"""
Method to create embedddings for documents by encoding their text.
:param docs: A document array with documents to create embeddings for. Only the
documents that have the ``text`` attribute will get embeddings.
:param parameters: A dictionary that contains parameters to control encoding.
The accepted keys are ``traversal_paths`` and ``batch_size`` - in their
absence their corresponding default values are used.
"""
batch_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
)
with torch.no_grad():
for batch in batch_generator:
embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])
embeddings = embeddings.cpu().numpy()
for idx, doc in enumerate(batch):
doc.embedding = embeddings[idx]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .distributed import MMDistributedDataParallel
from .seperate_distributed import MMSeparateDistributedDataParallel
from .utils import is_model_wrapper
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper',
'MMSeparateDistributedDataParallel'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .fully_sharded_distributed import \
MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.parrots_wrapper import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .distributed import MMDistributedDataParallel
from .seperate_distributed import MMSeparateDistributedDataParallel
from .utils import is_model_wrapper
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper',
'MMSeparateDistributedDataParallel'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .fully_sharded_distributed import \
MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.