input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from typing import Any, cast
import torch
from torch import nn
from .base_structured_sparsifier import BaseStructuredSparsifier
from .parametrization import FakeStructuredSparsity
class LSTMSaliencyPruner(BaseStructuredSparsifier):
"""
Prune packed LSTM weights based on saliency.
For each layer {k} inside a LSTM, we have two packed weight matrices
- weight_ih_l{k}
- weight_hh_l{k}
These tensors pack the weights for the 4 linear layers together for efficiency.
[W_ii | W_if | W_ig | W_io]
Pruning this tensor directly will lead to weights being misassigned when unpacked.
To ensure that each packed linear layer is pruned the same amount:
1. We split the packed weight into the 4 constituent linear parts
2. Update the mask for each individual piece using saliency individually
This applies to both weight_ih_l{k} and weight_hh_l{k}.
"""
def update_mask(self, module: nn.Module, tensor_name: str, **kwargs: Any) -> None:
weights = getattr(module, tensor_name)
for p in getattr(module.parametrizations, tensor_name):
if isinstance(p, FakeStructuredSparsity):
mask = cast(torch.Tensor, p.mask)
# select weights based on magnitude
if weights.dim() <= 1:
raise Exception( # noqa: TRY002
"Structured pruning can only be applied to a 2+dim weight tensor!"
)
# take norm over all but first dim
dims = tuple(range(1, weights.dim()))
saliency = weights.norm(dim=dims, p=1)
# handle weights in 4 groups
split_size = len(mask) // 4
masks = torch.split(mask, split_size)
saliencies = torch.split(saliency, split_size)
for keep_mask, sal in zip(masks, saliencies):
# mask smallest k values to be removed
k = int(len(keep_mask) * kwargs["sparsity_level"])
prune = sal.topk(k, largest=False, sorted=False).indices
keep_mask.data[prune] = False # modifies underlying p.mask directly
|
# mypy: allow-untyped-defs
from typing import cast
import torch
from .base_structured_sparsifier import BaseStructuredSparsifier, FakeStructuredSparsity
class LSTMSaliencyPruner(BaseStructuredSparsifier):
"""
Prune packed LSTM weights based on saliency.
For each layer {k} inside a LSTM, we have two packed weight matrices
- weight_ih_l{k}
- weight_hh_l{k}
These tensors pack the weights for the 4 linear layers together for efficiency.
[W_ii | W_if | W_ig | W_io]
Pruning this tensor directly will lead to weights being misassigned when unpacked.
To ensure that each packed linear layer is pruned the same amount:
1. We split the packed weight into the 4 constituent linear parts
2. Update the mask for each individual piece using saliency individually
This applies to both weight_ih_l{k} and weight_hh_l{k}.
"""
def update_mask(self, module, tensor_name, **kwargs):
weights = getattr(module, tensor_name)
for p in getattr(module.parametrizations, tensor_name):
if isinstance(p, FakeStructuredSparsity):
mask = cast(torch.Tensor, p.mask)
# select weights based on magnitude
if weights.dim() <= 1:
raise Exception( # noqa: TRY002
"Structured pruning can only be applied to a 2+dim weight tensor!"
)
# take norm over all but first dim
dims = tuple(range(1, weights.dim()))
saliency = weights.norm(dim=dims, p=1)
# handle weights in 4 groups
split_size = len(mask) // 4
masks = torch.split(mask, split_size)
saliencies = torch.split(saliency, split_size)
for keep_mask, sal in zip(masks, saliencies):
# mask smallest k values to be removed
k = int(len(keep_mask) * kwargs["sparsity_level"])
prune = sal.topk(k, largest=False, sorted=False).indices
keep_mask.data[prune] = False # modifies underlying p.mask directly
|
__version__ = '0.14.10'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.14.9'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
from collections.abc import Iterable
from pathlib import Path
from typing import Any
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
def add_dependencies_to_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[tuple[str, Path]]
) -> None:
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
# tomlkit types aren't amazing - treat as Dict instead
pyproject: dict[str, Any] = load(f)
pyproject["tool"]["poetry"]["dependencies"].update(
{
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
for name, loc in local_editable_dependencies
}
)
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
def remove_dependencies_from_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
) -> None:
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
pyproject: dict[str, Any] = load(f)
# tomlkit types aren't amazing - treat as Dict instead
dependencies = pyproject["tool"]["poetry"]["dependencies"]
for name in local_editable_dependencies:
try:
del dependencies[name]
except KeyError:
pass
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
|
from pathlib import Path
from typing import Any, Dict, Iterable, Tuple
from tomlkit import dump, inline_table, load
from tomlkit.items import InlineTable
def _get_dep_inline_table(path: Path) -> InlineTable:
dep = inline_table()
dep.update({"path": str(path), "develop": True})
return dep
def add_dependencies_to_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[Tuple[str, Path]]
) -> None:
"""Add dependencies to pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
# tomlkit types aren't amazing - treat as Dict instead
pyproject: Dict[str, Any] = load(f)
pyproject["tool"]["poetry"]["dependencies"].update(
{
name: _get_dep_inline_table(loc.relative_to(pyproject_toml.parent))
for name, loc in local_editable_dependencies
}
)
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
def remove_dependencies_from_pyproject_toml(
pyproject_toml: Path, local_editable_dependencies: Iterable[str]
) -> None:
"""Remove dependencies from pyproject.toml."""
with open(pyproject_toml, encoding="utf-8") as f:
pyproject: Dict[str, Any] = load(f)
# tomlkit types aren't amazing - treat as Dict instead
dependencies = pyproject["tool"]["poetry"]["dependencies"]
for name in local_editable_dependencies:
try:
del dependencies[name]
except KeyError:
pass
with open(pyproject_toml, "w", encoding="utf-8") as f:
dump(pyproject, f)
|
"""Create Package variants for PyPI distribution."""
import argparse
import os
from test_utils import PY_PACKAGE
IN_PATH = os.path.join(PY_PACKAGE, "pyproject.toml.in")
OUT_PATH = os.path.join(PY_PACKAGE, "pyproject.toml")
NCCL_WHL = """ \"nvidia-nccl-cu12 ; platform_system == 'Linux' and platform_machine != 'aarch64'\","""
NAME = "{{ name }}"
NCCL = "{{ nccl }}"
def copyfile(src: str, dst: str) -> None:
with open(src, "rb") as fd:
content = fd.read()
with open(dst, "wb") as fd:
fd.write(content)
def make_pyproject(*, use_cpu_suffix: int, require_nccl_dep: int) -> None:
if use_cpu_suffix == 1 and require_nccl_dep == 1:
raise ValueError(
"xgboost-cpu cannot require NCCL dependency. "
"If --use-cpu-suffix=1, you must set --require-nccl-dep=0."
)
with open(IN_PATH) as fd:
pyproject = fd.read()
readme_dft = os.path.join(PY_PACKAGE, "README.dft.rst")
readme_cpu = os.path.join(PY_PACKAGE, "README.cpu.rst")
readme = os.path.join(PY_PACKAGE, "README.rst")
pyproject = pyproject.replace(NAME, "xgboost-cpu" if use_cpu_suffix else "xgboost")
copyfile(readme_cpu if use_cpu_suffix else readme_dft, readme)
pyproject = pyproject.replace(NCCL, NCCL_WHL if require_nccl_dep else "")
pyproject = (
f"# Generated by `{os.path.basename(__file__)}`, don't edit.\n" + pyproject
)
with open(OUT_PATH, "w") as fd:
fd.write(pyproject)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--use-cpu-suffix",
type=int,
choices=[0, 1],
required=True,
help="Whether to rename the package name to xgboost-cpu",
)
parser.add_argument(
"--require-nccl-dep",
type=int,
choices=[0, 1],
required=True,
help="Whether to require the NCCL dependency",
)
args = parser.parse_args()
make_pyproject(
use_cpu_suffix=args.use_cpu_suffix,
require_nccl_dep=args.require_nccl_dep,
)
|
"""Create Package variants for PyPI distribution."""
import argparse
import os
from test_utils import PY_PACKAGE
IN_PATH = os.path.join(PY_PACKAGE, "pyproject.toml.in")
OUT_PATH = os.path.join(PY_PACKAGE, "pyproject.toml")
CHOICES = ["default", "cpu", "manylinux2014"]
NCCL_WHL = """ \"nvidia-nccl-cu12 ; platform_system == 'Linux' and platform_machine != 'aarch64'\","""
NAME = "{{ name }}"
NCCL = "{{ nccl }}"
def copyfile(src: str, dst: str) -> None:
with open(src, "rb") as fd:
content = fd.read()
with open(dst, "wb") as fd:
fd.write(content)
def make_pyproject(variant: str) -> None:
assert variant in CHOICES
with open(IN_PATH) as fd:
pyproject = fd.read()
readme_dft = os.path.join(PY_PACKAGE, "README.dft.rst")
readme_cpu = os.path.join(PY_PACKAGE, "README.cpu.rst")
readme = os.path.join(PY_PACKAGE, "README.rst")
if variant == "cpu":
pyproject = pyproject.replace(NAME, "xgboost-cpu").replace(NCCL, "")
copyfile(readme_cpu, readme)
elif variant == "manylinux2014":
pyproject = pyproject.replace(NAME, "xgboost").replace(NCCL, "")
copyfile(readme_dft, readme)
else:
pyproject = pyproject.replace(NAME, "xgboost").replace(NCCL, NCCL_WHL)
copyfile(readme_dft, readme)
pyproject = (
f"# Generated by `{os.path.basename(__file__)}`, don't edit.\n" + pyproject
)
with open(OUT_PATH, "w") as fd:
fd.write(pyproject)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--variant",
type=str,
choices=CHOICES,
default="default",
)
args = parser.parse_args()
make_pyproject(args.variant)
|
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:""" # noqa: E501
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
system_template = """Use the following pieces of context to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}""" # noqa: E501
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
|
# flake8: noqa
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:"""
PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
system_template = """Use the following pieces of context to answer the user's question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
{context}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
CHAT_PROMPT = ChatPromptTemplate.from_messages(messages)
PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=PROMPT, conditionals=[(is_chat_model, CHAT_PROMPT)]
)
|
import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [
ExecutionStatus.COMPLETED,
ExecutionStatus.TERMINATED,
ExecutionStatus.FAILED,
]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers import Wrapper
from keras.src.layers.input_spec import InputSpec
from keras.src.utils.numerical_utils import normalize
@keras_export("keras.layers.SpectralNormalization")
class SpectralNormalization(Wrapper):
"""Performs spectral normalization on the weights of a target layer.
This wrapper controls the Lipschitz constant of the weights of a layer by
constraining their spectral norm, which can stabilize the training of GANs.
Args:
layer: A `keras.layers.Layer` instance that
has either a `kernel` (e.g. `Conv2D`, `Dense`...)
or an `embeddings` attribute (`Embedding` layer).
power_iterations: int, the number of iterations during normalization.
**kwargs: Base wrapper keyword arguments.
Examples:
Wrap `keras.layers.Conv2D`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> conv2d = SpectralNormalization(keras.layers.Conv2D(2, 2))
>>> y = conv2d(x)
>>> y.shape
(1, 9, 9, 2)
Wrap `keras.layers.Dense`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> dense = SpectralNormalization(keras.layers.Dense(10))
>>> y = dense(x)
>>> y.shape
(1, 10, 10, 10)
Reference:
- [Spectral Normalization for GAN](https://arxiv.org/abs/1802.05957).
"""
def __init__(self, layer, power_iterations=1, **kwargs):
super().__init__(layer, **kwargs)
if power_iterations <= 0:
raise ValueError(
"`power_iterations` should be greater than zero. Received: "
f"`power_iterations={power_iterations}`"
)
self.power_iterations = power_iterations
def build(self, input_shape):
super().build(input_shape)
self.input_spec = InputSpec(shape=[None] + list(input_shape[1:]))
if hasattr(self.layer, "kernel"):
self.kernel = self.layer.kernel
elif hasattr(self.layer, "embeddings"):
self.kernel = self.layer.embeddings
else:
raise ValueError(
f"{type(self.layer).__name__} object has no attribute 'kernel' "
"nor 'embeddings'"
)
self.kernel_shape = self.kernel.shape
self.vector_u = self.add_weight(
shape=(1, self.kernel_shape[-1]),
initializer=initializers.TruncatedNormal(stddev=0.02),
trainable=False,
name="vector_u",
dtype=self.kernel.dtype,
)
def call(self, inputs, training=False):
if training:
new_vector_u, new_kernel = ops.cond(
ops.all(ops.equal(self.kernel.value, 0)),
lambda: (self.vector_u.value, self.kernel.value),
self.normalized_weights,
)
self.vector_u.assign(new_vector_u)
self.kernel.assign(new_kernel)
output = self.layer(inputs)
return ops.cast(output, inputs.dtype)
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def normalized_weights(self):
"""Generate spectral normalized weights.
This method returns the updated value for `self.kernel` with the
spectral normalized value, so that the layer is ready for `call()`.
"""
weights = ops.reshape(self.kernel, [-1, self.kernel_shape[-1]])
vector_u = self.vector_u.value
for _ in range(self.power_iterations):
vector_v = normalize(
ops.matmul(vector_u, ops.transpose(weights)), axis=None
)
vector_u = normalize(ops.matmul(vector_v, weights), axis=None)
vector_u = ops.stop_gradient(vector_u)
vector_v = ops.stop_gradient(vector_v)
sigma = ops.matmul(
ops.matmul(vector_v, weights), ops.transpose(vector_u)
)
kernel = ops.reshape(ops.divide(self.kernel, sigma), self.kernel_shape)
return ops.cast(vector_u, self.vector_u.dtype), ops.cast(
kernel, self.kernel.dtype
)
def get_config(self):
config = {"power_iterations": self.power_iterations}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers import Wrapper
from keras.src.layers.input_spec import InputSpec
from keras.src.utils.numerical_utils import normalize
@keras_export("keras.layers.SpectralNormalization")
class SpectralNormalization(Wrapper):
"""Performs spectral normalization on the weights of a target layer.
This wrapper controls the Lipschitz constant of the weights of a layer by
constraining their spectral norm, which can stabilize the training of GANs.
Args:
layer: A `keras.layers.Layer` instance that
has either a `kernel` (e.g. `Conv2D`, `Dense`...)
or an `embeddings` attribute (`Embedding` layer).
power_iterations: int, the number of iterations during normalization.
**kwargs: Base wrapper keyword arguments.
Examples:
Wrap `keras.layers.Conv2D`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> conv2d = SpectralNormalization(keras.layers.Conv2D(2, 2))
>>> y = conv2d(x)
>>> y.shape
(1, 9, 9, 2)
Wrap `keras.layers.Dense`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> dense = SpectralNormalization(keras.layers.Dense(10))
>>> y = dense(x)
>>> y.shape
(1, 10, 10, 10)
Reference:
- [Spectral Normalization for GAN](https://arxiv.org/abs/1802.05957).
"""
def __init__(self, layer, power_iterations=1, **kwargs):
super().__init__(layer, **kwargs)
if power_iterations <= 0:
raise ValueError(
"`power_iterations` should be greater than zero. Received: "
f"`power_iterations={power_iterations}`"
)
self.power_iterations = power_iterations
def build(self, input_shape):
super().build(input_shape)
self.input_spec = InputSpec(shape=[None] + list(input_shape[1:]))
if hasattr(self.layer, "kernel"):
self.kernel = self.layer.kernel
elif hasattr(self.layer, "embeddings"):
self.kernel = self.layer.embeddings
else:
raise ValueError(
f"{type(self.layer).__name__} object has no attribute 'kernel' "
"nor 'embeddings'"
)
self.kernel_shape = self.kernel.shape
self.vector_u = self.add_weight(
shape=(1, self.kernel_shape[-1]),
initializer=initializers.TruncatedNormal(stddev=0.02),
trainable=False,
name="vector_u",
dtype=self.kernel.dtype,
)
def call(self, inputs, training=False):
if training:
new_vector_u, new_kernel = ops.cond(
ops.all(ops.equal(self.kernel.value, 0)),
lambda: (self.vector_u.value, self.kernel.value),
self.normalized_weights,
)
self.vector_u.assign(new_vector_u)
self.kernel.assign(new_kernel)
output = self.layer(inputs)
return ops.cast(output, inputs.dtype)
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def normalized_weights(self):
"""Generate spectral normalized weights.
This method returns the updated value for `self.kernel` with the
spectral normalized value, so that the layer is ready for `call()`.
"""
weights = ops.reshape(self.kernel, [-1, self.kernel_shape[-1]])
vector_u = self.vector_u.value
for _ in range(self.power_iterations):
vector_v = normalize(
ops.matmul(vector_u, ops.transpose(weights)), axis=None
)
vector_u = normalize(ops.matmul(vector_v, weights), axis=None)
# vector_u = tf.stop_gradient(vector_u)
# vector_v = tf.stop_gradient(vector_v)
sigma = ops.matmul(
ops.matmul(vector_v, weights), ops.transpose(vector_u)
)
kernel = ops.reshape(ops.divide(self.kernel, sigma), self.kernel_shape)
return ops.cast(vector_u, self.vector_u.dtype), ops.cast(
kernel, self.kernel.dtype
)
def get_config(self):
config = {"power_iterations": self.power_iterations}
base_config = super().get_config()
return {**base_config, **config}
|
from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .linear import LinearOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
TwitterOAuthHandler,
LinearOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
from typing import TYPE_CHECKING
from .github import GitHubOAuthHandler
from .google import GoogleOAuthHandler
from .notion import NotionOAuthHandler
from .twitter import TwitterOAuthHandler
if TYPE_CHECKING:
from ..providers import ProviderName
from .base import BaseOAuthHandler
# --8<-- [start:HANDLERS_BY_NAMEExample]
HANDLERS_BY_NAME: dict["ProviderName", type["BaseOAuthHandler"]] = {
handler.PROVIDER_NAME: handler
for handler in [
GitHubOAuthHandler,
GoogleOAuthHandler,
NotionOAuthHandler,
TwitterOAuthHandler,
]
}
# --8<-- [end:HANDLERS_BY_NAMEExample]
__all__ = ["HANDLERS_BY_NAME"]
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.queue import RedisExecutionEventBus
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return Config().database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
get_or_refill_credit = cast(
Callable[[Any, str], int],
exposed_run_and_wait(user_credit_model.get_or_refill_credit),
)
spend_credits = cast(
Callable[[Any, str, int, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
"""Usage utilities."""
from typing import Callable
def _dict_int_op(
left: dict,
right: dict,
op: Callable[[int, int], int],
*,
default: int = 0,
depth: int = 0,
max_depth: int = 100,
) -> dict:
if depth >= max_depth:
msg = f"{max_depth=} exceeded, unable to combine dicts."
raise ValueError(msg)
combined: dict = {}
for k in set(left).union(right):
if isinstance(left.get(k, default), int) and isinstance(
right.get(k, default), int
):
combined[k] = op(left.get(k, default), right.get(k, default))
elif isinstance(left.get(k, {}), dict) and isinstance(right.get(k, {}), dict):
combined[k] = _dict_int_op(
left.get(k, {}),
right.get(k, {}),
op,
default=default,
depth=depth + 1,
max_depth=max_depth,
)
else:
types = [type(d[k]) for d in (left, right) if k in d]
msg = (
f"Unknown value types: {types}. Only dict and int values are supported."
)
raise ValueError(msg) # noqa: TRY004
return combined
|
from typing import Callable
def _dict_int_op(
left: dict,
right: dict,
op: Callable[[int, int], int],
*,
default: int = 0,
depth: int = 0,
max_depth: int = 100,
) -> dict:
if depth >= max_depth:
msg = f"{max_depth=} exceeded, unable to combine dicts."
raise ValueError(msg)
combined: dict = {}
for k in set(left).union(right):
if isinstance(left.get(k, default), int) and isinstance(
right.get(k, default), int
):
combined[k] = op(left.get(k, default), right.get(k, default))
elif isinstance(left.get(k, {}), dict) and isinstance(right.get(k, {}), dict):
combined[k] = _dict_int_op(
left.get(k, {}),
right.get(k, {}),
op,
default=default,
depth=depth + 1,
max_depth=max_depth,
)
else:
types = [type(d[k]) for d in (left, right) if k in d]
msg = (
f"Unknown value types: {types}. Only dict and int values are supported."
)
raise ValueError(msg) # noqa: TRY004
return combined
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import safetensors
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class DreamBoothLoRASDXLWithEDM(ExamplesTestsAccelerate):
def test_dreambooth_lora_sdxl_with_edm(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/dreambooth/train_dreambooth_lora_sdxl.py
--pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe
--do_edm_style_training
--instance_data_dir docs/source/en/imgs
--instance_prompt photo
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
# when not training the text encoder, all the parameters in the state dict should start
# with `"unet"` in their names.
starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys())
self.assertTrue(starts_with_unet)
def test_dreambooth_lora_playground(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/dreambooth/train_dreambooth_lora_sdxl.py
--pretrained_model_name_or_path hf-internal-testing/tiny-playground-v2-5-pipe
--instance_data_dir docs/source/en/imgs
--instance_prompt photo
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
# when not training the text encoder, all the parameters in the state dict should start
# with `"unet"` in their names.
starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys())
self.assertTrue(starts_with_unet)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import safetensors
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class DreamBoothLoRASDXLWithEDM(ExamplesTestsAccelerate):
def test_dreambooth_lora_sdxl_with_edm(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/dreambooth/train_dreambooth_lora_sdxl.py
--pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe
--do_edm_style_training
--instance_data_dir docs/source/en/imgs
--instance_prompt photo
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
# when not training the text encoder, all the parameters in the state dict should start
# with `"unet"` in their names.
starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys())
self.assertTrue(starts_with_unet)
def test_dreambooth_lora_playground(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/dreambooth/train_dreambooth_lora_sdxl.py
--pretrained_model_name_or_path hf-internal-testing/tiny-playground-v2-5-pipe
--instance_data_dir docs/source/en/imgs
--instance_prompt photo
--resolution 64
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
# when not training the text encoder, all the parameters in the state dict should start
# with `"unet"` in their names.
starts_with_unet = all(key.startswith("unet") for key in lora_state_dict.keys())
self.assertTrue(starts_with_unet)
|
"""Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
MultiModalLLMCompletionProgram,
)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from pydantic import BaseModel, Field
async def _screenshot_page(
url: str, out_path: str, width: int = 1200, height: int = 800
) -> None:
from pyppeteer import launch
browser = await launch()
page = await browser.newPage()
await page.setViewport({"width": 1200, "height": 800})
await page.goto(url, {"waitUntil": "domcontentloaded"})
await page.screenshot({"path": out_path})
await browser.close()
class Product(BaseModel):
"""Data model for an Amazon Product."""
title: str = Field(..., description="Title of product")
category: str = Field(..., description="Category of product")
discount: float = Field(..., description="Discount of product")
price: float = Field(..., description="Price of product")
rating: float = Field(..., description="Rating of product")
description: str = Field(..., description="Description of product")
img_description: str = Field(..., description="Description of product image")
inventory: str = Field(..., description="Inventory of product")
DEFAULT_PROMPT_TEMPLATE_STR = """\
Can you extract the following fields from this product, in JSON format?
"""
class AmazonProductExtractionPack(BaseLlamaPack):
"""
Product extraction pack.
Given a website url of a product (e.g. Amazon page), screenshot it,
and use GPT-4V to extract structured outputs.
"""
def __init__(
self,
website_url: str,
tmp_file_path: str = "./tmp.png",
screenshot_width: int = 1200,
screenshot_height: int = 800,
prompt_template_str: str = DEFAULT_PROMPT_TEMPLATE_STR,
**kwargs: Any,
) -> None:
"""Init params."""
self.website_url = website_url
# download image to temporary file
asyncio.get_event_loop().run_until_complete(
_screenshot_page(
website_url,
tmp_file_path,
width=screenshot_width,
height=screenshot_height,
)
)
# put your local directory here
self.image_documents = SimpleDirectoryReader(
input_files=[tmp_file_path]
).load_data()
# initialize openai pydantic program
self.openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
self.openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Product),
image_documents=self.image_documents,
prompt_template_str=prompt_template_str,
llm=self.openai_mm_llm,
verbose=True,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"openai_program": self.openai_program,
"openai_mm_llm": self.openai_mm_llm,
"image_documents": self.image_documents,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.openai_program(*args, **kwargs)
|
"""Product extraction pack."""
import asyncio
from typing import Any, Dict
from llama_index.core import SimpleDirectoryReader
from llama_index.core.llama_pack.base import BaseLlamaPack
from llama_index.core.output_parsers import PydanticOutputParser
from llama_index.core.program.multi_modal_llm_program import (
MultiModalLLMCompletionProgram,
)
from llama_index.multi_modal_llms.openai import OpenAIMultiModal
from pydantic import BaseModel, Field
async def _screenshot_page(
url: str, out_path: str, width: int = 1200, height: int = 800
) -> None:
from pyppeteer import launch
browser = await launch()
page = await browser.newPage()
await page.setViewport({"width": 1200, "height": 800})
await page.goto(url, {"waitUntil": "domcontentloaded"})
await page.screenshot({"path": out_path})
await browser.close()
class Product(BaseModel):
"""Data model for an Amazon Product."""
title: str = Field(..., description="Title of product")
category: str = Field(..., description="Category of product")
discount: float = Field(..., description="Discount of product")
price: float = Field(..., description="Price of product")
rating: float = Field(..., description="Rating of product")
description: str = Field(..., description="Description of product")
img_description: str = Field(..., description="Description of product image")
inventory: str = Field(..., description="Inventory of product")
DEFAULT_PROMPT_TEMPLATE_STR = """\
Can you extract the following fields from this product, in JSON format?
"""
class AmazonProductExtractionPack(BaseLlamaPack):
"""
Product extraction pack.
Given a website url of a product (e.g. Amazon page), screenshot it,
and use GPT-4V to extract structured outputs.
"""
def __init__(
self,
website_url: str,
tmp_file_path: str = "./tmp.png",
screenshot_width: int = 1200,
screenshot_height: int = 800,
prompt_template_str: str = DEFAULT_PROMPT_TEMPLATE_STR,
**kwargs: Any,
) -> None:
"""Init params."""
self.website_url = website_url
# download image to temporary file
asyncio.get_event_loop().run_until_complete(
_screenshot_page(
website_url,
tmp_file_path,
width=screenshot_width,
height=screenshot_height,
)
)
# put your local directory here
self.image_documents = SimpleDirectoryReader(
input_files=[tmp_file_path]
).load_data()
# initialize openai pydantic program
self.openai_mm_llm = OpenAIMultiModal(
model="gpt-4-vision-preview", max_new_tokens=1000
)
self.openai_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=PydanticOutputParser(Product),
image_documents=self.image_documents,
prompt_template_str=prompt_template_str,
llm=self.openai_mm_llm,
verbose=True,
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"openai_program": self.openai_program,
"openai_mm_llm": self.openai_mm_llm,
"image_documents": self.image_documents,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.openai_program(*args, **kwargs)
|
import json
from typing import AsyncGenerator, Dict
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.llms.bedrock import (
ALTERNATION_ERROR,
Bedrock,
_human_assistant_format,
)
TEST_CASES = {
"""Hey""": """
Human: Hey
Assistant:""",
"""
Human: Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""Human: Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""
Human: Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""
Human: Human: Hello
Assistant:""": (
"Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'."
),
"""Human: Hello
Assistant: Hello
Human: Hello
Assistant:""": """
Human: Hello
Assistant: Hello
Human: Hello
Assistant:""",
"""
Human: Hello
Assistant: Hello
Human: Hello
Assistant:""": """
Human: Hello
Assistant: Hello
Human: Hello
Assistant:""",
"""
Human: Hello
Assistant: Hello
Human: Hello
Assistant: Hello
Assistant: Hello""": ALTERNATION_ERROR,
"""
Human: Hi.
Assistant: Hi.
Human: Hi.
Human: Hi.
Assistant:""": ALTERNATION_ERROR,
"""
Human: Hello""": """
Human: Hello
Assistant:""",
"""
Human: Hello
Hello
Assistant""": """
Human: Hello
Hello
Assistant
Assistant:""",
"""Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""Hello
Human: Hello
""": """Hello
Human: Hello
Assistant:""",
"""
Human: Assistant: Hello""": """
Human:
Assistant: Hello""",
"""
Human: Human
Assistant: Assistant
Human: Assistant
Assistant: Human""": """
Human: Human
Assistant: Assistant
Human: Assistant
Assistant: Human""",
"""
Assistant: Hello there, your name is:
Human.
Human: Hello there, your name is:
Assistant.""": """
Human:
Assistant: Hello there, your name is:
Human.
Human: Hello there, your name is:
Assistant.
Assistant:""",
"""
Human: Human: Hi
Assistant: Hi""": ALTERNATION_ERROR,
"""Human: Hi
Human: Hi""": ALTERNATION_ERROR,
"""
Assistant: Hi
Human: Hi""": """
Human:
Assistant: Hi
Human: Hi
Assistant:""",
"""
Human: Hi
Assistant: Yo
Human: Hey
Assistant: Sup
Human: Hi
Assistant: Hi
Human: Hi
Assistant:""": """
Human: Hi
Assistant: Yo
Human: Hey
Assistant: Sup
Human: Hi
Assistant: Hi
Human: Hi
Assistant:""",
"""
Hello.
Human: Hello.
Assistant:""": """
Hello.
Human: Hello.
Assistant:""",
}
def test__human_assistant_format() -> None:
for input_text, expected_output in TEST_CASES.items():
if expected_output == ALTERNATION_ERROR:
with pytest.warns(UserWarning, match=ALTERNATION_ERROR):
_human_assistant_format(input_text)
else:
output = _human_assistant_format(input_text)
assert output == expected_output
# Sample mock streaming response data
MOCK_STREAMING_RESPONSE = [
{"chunk": {"bytes": b'{"text": "nice"}'}},
{"chunk": {"bytes": b'{"text": " to meet"}'}},
{"chunk": {"bytes": b'{"text": " you"}'}},
]
async def async_gen_mock_streaming_response() -> AsyncGenerator[Dict, None]:
for item in MOCK_STREAMING_RESPONSE:
yield item
async def test_bedrock_async_streaming_call() -> None:
# Mock boto3 import
mock_boto3 = MagicMock()
session = MagicMock()
session.region_name = "region"
mock_boto3.Session.return_value = session
mock_boto3.Session.return_value.client.return_value = (
session # Mocking the client method of the Session object
)
with patch.dict(
"sys.modules", {"boto3": mock_boto3}
): # Mocking boto3 at the top level using patch.dict
# Mock the `Bedrock` class's method that invokes the model
mock_invoke_method = MagicMock(return_value=async_gen_mock_streaming_response())
with patch.object(
Bedrock, "_aprepare_input_and_invoke_stream", mock_invoke_method
):
# Instantiate the Bedrock LLM
llm = Bedrock(
client=None,
model_id="anthropic.claude-v2",
streaming=True,
)
# Call the _astream method
chunks = [
json.loads(chunk["chunk"]["bytes"])["text"] # type: ignore[index]
async for chunk in llm._astream("Hey, how are you?")
]
# Assertions
assert len(chunks) == 3
assert chunks[0] == "nice"
assert chunks[1] == " to meet"
assert chunks[2] == " you"
|
import json
from typing import AsyncGenerator, Dict
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.llms.bedrock import (
ALTERNATION_ERROR,
Bedrock,
_human_assistant_format,
)
TEST_CASES = {
"""Hey""": """
Human: Hey
Assistant:""",
"""
Human: Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""Human: Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""
Human: Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""
Human: Human: Hello
Assistant:""": (
"Error: Prompt must alternate between '\n\nHuman:' and '\n\nAssistant:'."
),
"""Human: Hello
Assistant: Hello
Human: Hello
Assistant:""": """
Human: Hello
Assistant: Hello
Human: Hello
Assistant:""",
"""
Human: Hello
Assistant: Hello
Human: Hello
Assistant:""": """
Human: Hello
Assistant: Hello
Human: Hello
Assistant:""",
"""
Human: Hello
Assistant: Hello
Human: Hello
Assistant: Hello
Assistant: Hello""": ALTERNATION_ERROR,
"""
Human: Hi.
Assistant: Hi.
Human: Hi.
Human: Hi.
Assistant:""": ALTERNATION_ERROR,
"""
Human: Hello""": """
Human: Hello
Assistant:""",
"""
Human: Hello
Hello
Assistant""": """
Human: Hello
Hello
Assistant
Assistant:""",
"""Hello
Assistant:""": """
Human: Hello
Assistant:""",
"""Hello
Human: Hello
""": """Hello
Human: Hello
Assistant:""",
"""
Human: Assistant: Hello""": """
Human:
Assistant: Hello""",
"""
Human: Human
Assistant: Assistant
Human: Assistant
Assistant: Human""": """
Human: Human
Assistant: Assistant
Human: Assistant
Assistant: Human""",
"""
Assistant: Hello there, your name is:
Human.
Human: Hello there, your name is:
Assistant.""": """
Human:
Assistant: Hello there, your name is:
Human.
Human: Hello there, your name is:
Assistant.
Assistant:""",
"""
Human: Human: Hi
Assistant: Hi""": ALTERNATION_ERROR,
"""Human: Hi
Human: Hi""": ALTERNATION_ERROR,
"""
Assistant: Hi
Human: Hi""": """
Human:
Assistant: Hi
Human: Hi
Assistant:""",
"""
Human: Hi
Assistant: Yo
Human: Hey
Assistant: Sup
Human: Hi
Assistant: Hi
Human: Hi
Assistant:""": """
Human: Hi
Assistant: Yo
Human: Hey
Assistant: Sup
Human: Hi
Assistant: Hi
Human: Hi
Assistant:""",
"""
Hello.
Human: Hello.
Assistant:""": """
Hello.
Human: Hello.
Assistant:""",
}
def test__human_assistant_format() -> None:
for input_text, expected_output in TEST_CASES.items():
if expected_output == ALTERNATION_ERROR:
with pytest.warns(UserWarning, match=ALTERNATION_ERROR):
_human_assistant_format(input_text)
else:
output = _human_assistant_format(input_text)
assert output == expected_output
# Sample mock streaming response data
MOCK_STREAMING_RESPONSE = [
{"chunk": {"bytes": b'{"text": "nice"}'}},
{"chunk": {"bytes": b'{"text": " to meet"}'}},
{"chunk": {"bytes": b'{"text": " you"}'}},
]
async def async_gen_mock_streaming_response() -> AsyncGenerator[Dict, None]:
for item in MOCK_STREAMING_RESPONSE:
yield item
async def test_bedrock_async_streaming_call() -> None:
# Mock boto3 import
mock_boto3 = MagicMock()
session = MagicMock()
session.region_name = "region"
mock_boto3.Session.return_value = session
mock_boto3.Session.return_value.client.return_value = (
session # Mocking the client method of the Session object
)
with patch.dict(
"sys.modules", {"boto3": mock_boto3}
): # Mocking boto3 at the top level using patch.dict
# Mock the `Bedrock` class's method that invokes the model
mock_invoke_method = MagicMock(return_value=async_gen_mock_streaming_response())
with patch.object(
Bedrock, "_aprepare_input_and_invoke_stream", mock_invoke_method
):
# Instantiate the Bedrock LLM
llm = Bedrock(
client=None,
model_id="anthropic.claude-v2",
streaming=True,
)
# Call the _astream method
chunks = [
json.loads(chunk["chunk"]["bytes"])["text"] # type: ignore
async for chunk in llm._astream("Hey, how are you?")
]
# Assertions
assert len(chunks) == 3
assert chunks[0] == "nice"
assert chunks[1] == " to meet"
assert chunks[2] == " you"
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Minimum")
class Minimum(Merge):
"""Computes elementwise minimum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Minimum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.minimum([x1, x2])`
>>> y = keras.layers.Minimum()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
return self._apply_merge_op_and_or_mask(ops.minimum, inputs)
@keras_export("keras.layers.minimum")
def minimum(inputs, **kwargs):
"""Functional interface to the `keras.layers.Minimum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.minimum([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.minimum([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Minimum(**kwargs)(inputs)
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.merging.base_merge import Merge
@keras_export("keras.layers.Minimum")
class Minimum(Merge):
"""Computes elementwise minimum on a list of inputs.
It takes as input a list of tensors, all of the same shape,
and returns a single tensor (also of the same shape).
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Minimum()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `y = keras.layers.minimum([x1, x2])`
>>> y = keras.layers.Minimum()([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def _merge_function(self, inputs):
output = inputs[0]
for i in range(1, len(inputs)):
output = ops.minimum(output, inputs[i])
return output
@keras_export("keras.layers.minimum")
def minimum(inputs, **kwargs):
"""Functional interface to the `keras.layers.Minimum` layer.
Args:
inputs: A list of input tensors , all of the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the elementwise product of the inputs with the same
shape as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.minimum([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> y = keras.layers.minimum([x1, x2])
>>> out = keras.layers.Dense(4)(y)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Minimum(**kwargs)(inputs)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.22.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.21.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
_base_ = './yolact_r50_1xb8-55e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './yolact_r50_1x8_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from llama_index.core.schema import (
BaseNode,
NodeRelationship,
RelatedNodeType,
TextNode,
)
from llama_index.core.utils import get_tqdm_iterable
class DoclingNodeParser(NodeParser):
"""
Docling format node parser.
Splits the JSON format of `DoclingReader` into nodes corresponding
to respective document elements from Docling's data model
(paragraphs, headings, tables etc.).
Args:
chunker (BaseChunker, optional): The chunker to use. Defaults to `HierarchicalChunker()`.
id_func(NodeIDGenCallable, optional): The node ID generation function to use. Defaults to `_uuid4_node_id_gen`.
"""
@runtime_checkable
class NodeIDGenCallable(Protocol):
def __call__(self, i: int, node: BaseNode) -> str:
...
@staticmethod
def _uuid4_node_id_gen(i: int, node: BaseNode) -> str:
return str(uuid.uuid4())
chunker: BaseChunker = HierarchicalChunker()
id_func: NodeIDGenCallable = _uuid4_node_id_gen
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> list[BaseNode]:
nodes_with_progress: Iterable[BaseNode] = get_tqdm_iterable(
items=nodes, show_progress=show_progress, desc="Parsing nodes"
)
all_nodes: list[BaseNode] = []
for input_node in nodes_with_progress:
li_doc = LIDocument.model_validate(input_node)
dl_doc: DLDocument = DLDocument.model_validate_json(li_doc.get_content())
chunk_iter = self.chunker.chunk(dl_doc=dl_doc)
for i, chunk in enumerate(chunk_iter):
rels: dict[NodeRelationship, RelatedNodeType] = {
NodeRelationship.SOURCE: li_doc.as_related_node_info(),
}
metadata = chunk.meta.export_json_dict()
excl_embed_keys = [
k for k in chunk.meta.excluded_embed if k in metadata
]
excl_llm_keys = [k for k in chunk.meta.excluded_llm if k in metadata]
node = TextNode(
id_=self.id_func(i=i, node=li_doc),
text=chunk.text,
excluded_embed_metadata_keys=excl_embed_keys,
excluded_llm_metadata_keys=excl_llm_keys,
relationships=rels,
)
node.metadata = metadata
all_nodes.append(node)
return all_nodes
|
from typing import Any, Iterable, Protocol, Sequence, runtime_checkable
import uuid
from llama_index.core.schema import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from docling_core.transforms.chunker import BaseChunker, HierarchicalChunker
from docling_core.types import DoclingDocument as DLDocument
from llama_index.core import Document as LIDocument
from llama_index.core.node_parser import NodeParser
from llama_index.core.schema import (
BaseNode,
NodeRelationship,
RelatedNodeType,
TextNode,
)
from llama_index.core.utils import get_tqdm_iterable
class DoclingNodeParser(NodeParser):
"""Docling format node parser.
Splits the JSON format of `DoclingReader` into nodes corresponding
to respective document elements from Docling's data model
(paragraphs, headings, tables etc.).
Args:
chunker (BaseChunker, optional): The chunker to use. Defaults to `HierarchicalChunker()`.
id_func(NodeIDGenCallable, optional): The node ID generation function to use. Defaults to `_uuid4_node_id_gen`.
"""
@runtime_checkable
class NodeIDGenCallable(Protocol):
def __call__(self, i: int, node: BaseNode) -> str:
...
@staticmethod
def _uuid4_node_id_gen(i: int, node: BaseNode) -> str:
return str(uuid.uuid4())
chunker: BaseChunker = HierarchicalChunker()
id_func: NodeIDGenCallable = _uuid4_node_id_gen
def _parse_nodes(
self,
nodes: Sequence[BaseNode],
show_progress: bool = False,
**kwargs: Any,
) -> list[BaseNode]:
nodes_with_progress: Iterable[BaseNode] = get_tqdm_iterable(
items=nodes, show_progress=show_progress, desc="Parsing nodes"
)
all_nodes: list[BaseNode] = []
for input_node in nodes_with_progress:
li_doc = LIDocument.model_validate(input_node)
dl_doc: DLDocument = DLDocument.model_validate_json(li_doc.get_content())
chunk_iter = self.chunker.chunk(dl_doc=dl_doc)
for i, chunk in enumerate(chunk_iter):
rels: dict[NodeRelationship, RelatedNodeType] = {
NodeRelationship.SOURCE: li_doc.as_related_node_info(),
}
metadata = chunk.meta.export_json_dict()
excl_embed_keys = [
k for k in chunk.meta.excluded_embed if k in metadata
]
excl_llm_keys = [k for k in chunk.meta.excluded_llm if k in metadata]
node = TextNode(
id_=self.id_func(i=i, node=li_doc),
text=chunk.text,
excluded_embed_metadata_keys=excl_embed_keys,
excluded_llm_metadata_keys=excl_llm_keys,
relationships=rels,
)
node.metadata = metadata
all_nodes.append(node)
return all_nodes
|
"""Test LLM Bash functionality."""
import os
import sys
from unittest.mock import patch
import pytest
from langchain.chains.llm import LLMChain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
_parse_string_eval_output,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_eval_chain() -> None:
"""Test a simple eval chain."""
example = {"query": "What's my name", "answer": "John Doe"}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = QAEvalChain.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert fake_qa_eval_chain.output_key in outputs[0]
assert outputs[0][fake_qa_eval_chain.output_key] == "foo"
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
@pytest.mark.parametrize("chain_cls", [ContextQAEvalChain, CotQAEvalChain])
def test_context_eval_chain(chain_cls: type[ContextQAEvalChain]) -> None:
"""Test a simple eval chain."""
example = {
"query": "What's my name",
"context": "The name of this person is John Doe",
}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = chain_cls.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert "text" in outputs[0]
assert outputs[0]["text"] == "foo"
def test_load_criteria_evaluator() -> None:
"""Test loading a criteria evaluator."""
try:
from langchain_openai import ChatOpenAI # noqa: F401
except ImportError:
pytest.skip("langchain-openai not installed")
# Patch the env with an openai-api-key
with patch.dict(os.environ, {"OPENAI_API_KEY": "foo"}):
# Check it can load using a string arg (even if that's not how it's typed)
load_evaluator("criteria") # type: ignore[arg-type]
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_implements_string_evaluator_protocol(
chain_cls: type[LLMChain],
) -> None:
assert issubclass(chain_cls, StringEvaluator)
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_returns_expected_results(
chain_cls: type[LLMChain],
) -> None:
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
chain = chain_cls.from_llm(fake_llm) # type: ignore[attr-defined]
results = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert results["score"] == 1
@pytest.mark.parametrize(
"output,expected",
[
(
""" GRADE: CORRECT
QUESTION: according to the passage, what is the main reason that the author wrote this passage?
STUDENT ANSWER: to explain the importance of washing your hands
TRUE ANSWER: to explain the importance of washing your hands
GRADE:""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" Here is my step-by-step reasoning to grade the student's answer:
1. The question asks who founded the Roanoke settlement.
2. The context states that the grade incorrect answer is Walter Raleigh.
3. The student's answer is "Sir Walter Raleigh".
4. The student's answer matches the context, which states the answer is Walter Raleigh.
5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct.
6. Therefore, the student's answer contains the same factual information as the true answer, so it should be graded as correct.
GRADE: CORRECT""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" CORRECT
QUESTION: who was the first president of the united states?
STUDENT ANSWER: George Washington
TRUE ANSWER: George Washington was the first president of the United States.
GRADE:""",
{
"value": "CORRECT",
"score": 1,
},
),
(
"""The student's answer is "Regent's Park," which matches the correct answer given in the context. Therefore, the student's answer is CORRECT.""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
],
)
def test_qa_output_parser(output: str, expected: dict) -> None:
expected["reasoning"] = output.strip()
assert _parse_string_eval_output(output) == expected
|
"""Test LLM Bash functionality."""
import os
import sys
from unittest.mock import patch
import pytest
from langchain.chains.llm import LLMChain
from langchain.evaluation.loading import load_evaluator
from langchain.evaluation.qa.eval_chain import (
ContextQAEvalChain,
CotQAEvalChain,
QAEvalChain,
_parse_string_eval_output,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
def test_eval_chain() -> None:
"""Test a simple eval chain."""
example = {"query": "What's my name", "answer": "John Doe"}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = QAEvalChain.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert fake_qa_eval_chain.output_key in outputs[0]
assert outputs[0][fake_qa_eval_chain.output_key] == "foo"
@pytest.mark.skipif(
sys.platform.startswith("win"), reason="Test not supported on Windows"
)
@pytest.mark.parametrize("chain_cls", [ContextQAEvalChain, CotQAEvalChain])
def test_context_eval_chain(chain_cls: type[ContextQAEvalChain]) -> None:
"""Test a simple eval chain."""
example = {
"query": "What's my name",
"context": "The name of this person is John Doe",
}
prediction = {"result": "John Doe"}
fake_qa_eval_chain = chain_cls.from_llm(FakeLLM())
outputs = fake_qa_eval_chain.evaluate([example, example], [prediction, prediction])
assert outputs[0] == outputs[1]
assert "text" in outputs[0]
assert outputs[0]["text"] == "foo"
def test_load_criteria_evaluator() -> None:
"""Test loading a criteria evaluator."""
try:
from langchain_openai import ChatOpenAI # noqa: F401
except ImportError:
pytest.skip("langchain-openai not installed")
# Patch the env with an openai-api-key
with patch.dict(os.environ, {"OPENAI_API_KEY": "foo"}):
# Check it can load using a string arg (even if that's not how it's typed)
load_evaluator("criteria") # type: ignore
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_implements_string_evaluator_protocol(
chain_cls: type[LLMChain],
) -> None:
assert issubclass(chain_cls, StringEvaluator)
@pytest.mark.parametrize("chain_cls", [QAEvalChain, ContextQAEvalChain, CotQAEvalChain])
def test_returns_expected_results(
chain_cls: type[LLMChain],
) -> None:
fake_llm = FakeLLM(
queries={"text": "The meaning of life\nCORRECT"}, sequential_responses=True
)
chain = chain_cls.from_llm(fake_llm) # type: ignore
results = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert results["score"] == 1
@pytest.mark.parametrize(
"output,expected",
[
(
""" GRADE: CORRECT
QUESTION: according to the passage, what is the main reason that the author wrote this passage?
STUDENT ANSWER: to explain the importance of washing your hands
TRUE ANSWER: to explain the importance of washing your hands
GRADE:""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" Here is my step-by-step reasoning to grade the student's answer:
1. The question asks who founded the Roanoke settlement.
2. The context states that the grade incorrect answer is Walter Raleigh.
3. The student's answer is "Sir Walter Raleigh".
4. The student's answer matches the context, which states the answer is Walter Raleigh.
5. The addition of "Sir" in the student's answer does not contradict the context. It provides extra detail about Walter Raleigh's title, but the core answer of Walter Raleigh is still correct.
6. Therefore, the student's answer contains the same factual information as the true answer, so it should be graded as correct.
GRADE: CORRECT""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
(
""" CORRECT
QUESTION: who was the first president of the united states?
STUDENT ANSWER: George Washington
TRUE ANSWER: George Washington was the first president of the United States.
GRADE:""",
{
"value": "CORRECT",
"score": 1,
},
),
(
"""The student's answer is "Regent's Park," which matches the correct answer given in the context. Therefore, the student's answer is CORRECT.""", # noqa: E501
{
"value": "CORRECT",
"score": 1,
},
),
],
)
def test_qa_output_parser(output: str, expected: dict) -> None:
expected["reasoning"] = output.strip()
assert _parse_string_eval_output(output) == expected
|
from contextlib import nullcontext
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List, Optional
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target sentence embedding.
The MSE is computed between ||teacher.encode(source_sentences) - student.encode(target_sentences)||.
For multilingual knowledge distillation (https://arxiv.org/abs/2004.09813), source_sentences are in English
and target_sentences are in a different language like German, Chinese, Spanish...
:param source_sentences: Source sentences are embedded with the teacher model
:param target_sentences: Target sentences are ambedding with the student model.
:param show_progress_bar: Show progress bar when computing embeddings
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
:param truncate_dim: The dimension to truncate sentence embeddings to. `None` uses the model's current truncation
dimension. Defaults to None.
"""
def __init__(
self,
source_sentences: List[str],
target_sentences: List[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: Optional[int] = None,
):
self.truncate_dim = truncate_dim
with nullcontext() if self.truncate_dim is None else teacher_model.truncate_sentence_embeddings(
self.truncate_dim
):
self.source_embeddings = teacher_model.encode(
source_sentences, show_progress_bar=show_progress_bar, batch_size=batch_size, convert_to_numpy=True
)
self.target_sentences = target_sentences
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
self.csv_file = "mse_evaluation_" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "MSE"]
self.write_csv = write_csv
def __call__(self, model, output_path, epoch=-1, steps=-1):
if epoch != -1:
if steps == -1:
out_txt = f" after epoch {epoch}"
else:
out_txt = f" in epoch {epoch} after {steps} steps"
else:
out_txt = ""
if self.truncate_dim is not None:
out_txt += f" (truncated to {self.truncate_dim})"
with nullcontext() if self.truncate_dim is None else model.truncate_sentence_embeddings(self.truncate_dim):
target_embeddings = model.encode(
self.target_sentences,
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size,
convert_to_numpy=True,
)
mse = ((self.source_embeddings - target_embeddings) ** 2).mean()
mse *= 100
logger.info(f"MSE evaluation (lower = better) on the {self.name} dataset{out_txt}:")
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mse])
return -mse # Return negative score as SentenceTransformers maximizes the performance
|
from sentence_transformers.evaluation import SentenceEvaluator
import logging
import os
import csv
from typing import List
logger = logging.getLogger(__name__)
class MSEEvaluator(SentenceEvaluator):
"""
Computes the mean squared error (x100) between the computed sentence embedding
and some target sentence embedding.
The MSE is computed between ||teacher.encode(source_sentences) - student.encode(target_sentences)||.
For multilingual knowledge distillation (https://arxiv.org/abs/2004.09813), source_sentences are in English
and target_sentences are in a different language like German, Chinese, Spanish...
:param source_sentences: Source sentences are embedded with the teacher model
:param target_sentences: Target sentences are ambedding with the student model.
:param show_progress_bar: Show progress bar when computing embeddings
:param batch_size: Batch size to compute sentence embeddings
:param name: Name of the evaluator
:param write_csv: Write results to CSV file
"""
def __init__(
self,
source_sentences: List[str],
target_sentences: List[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
):
self.source_embeddings = teacher_model.encode(
source_sentences, show_progress_bar=show_progress_bar, batch_size=batch_size, convert_to_numpy=True
)
self.target_sentences = target_sentences
self.show_progress_bar = show_progress_bar
self.batch_size = batch_size
self.name = name
self.csv_file = "mse_evaluation_" + name + "_results.csv"
self.csv_headers = ["epoch", "steps", "MSE"]
self.write_csv = write_csv
def __call__(self, model, output_path, epoch=-1, steps=-1):
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
target_embeddings = model.encode(
self.target_sentences,
show_progress_bar=self.show_progress_bar,
batch_size=self.batch_size,
convert_to_numpy=True,
)
mse = ((self.source_embeddings - target_embeddings) ** 2).mean()
mse *= 100
logger.info("MSE evaluation (lower = better) on " + self.name + " dataset" + out_txt)
logger.info("MSE (*100):\t{:4f}".format(mse))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, newline="", mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mse])
return -mse # Return negative score as SentenceTransformers maximizes the performance
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
import pytest
@pytest.fixture(scope='function')
def tmp_index_name():
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for _ in range(15))
return random_string
|
import random
import string
import pytest
@pytest.fixture(scope='function')
def tmp_index_name():
letters = string.ascii_lowercase
random_string = ''.join(random.choice(letters) for _ in range(15))
return random_string
|
from io import BytesIO
from typing import TYPE_CHECKING, List, NamedTuple, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.bytes.base_bytes import BaseBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(BaseBytes):
"""
Bytes that store a video and that can be load into a video tensor
"""
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of:
- a [`VideoNdArray`][docarray.typing.VideoNdArray] (`VideoLoadResult.video`)
- an [`AudioNdArray`][docarray.typing.AudioNdArray] (`VideoLoadResult.audio`)
- an [`NdArray`][docarray.typing.NdArray] containing the key frame indices (`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a `VideoLoadResult` instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames: List[np.ndarray] = []
video_frames: List[np.ndarray] = []
keyframe_indices: List[int] = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
video_frames.append(frame.to_ndarray(format='rgb24'))
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, List, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of:
- a [`VideoNdArray`][docarray.typing.VideoNdArray] (`VideoLoadResult.video`)
- an [`AudioNdArray`][docarray.typing.AudioNdArray] (`VideoLoadResult.audio`)
- an [`NdArray`][docarray.typing.NdArray] containing the key frame indices (`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a `VideoLoadResult` instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames: List[np.ndarray] = []
video_frames: List[np.ndarray] = []
keyframe_indices: List[int] = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
video_frames.append(frame.to_ndarray(format='rgb24'))
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
] # yapf: disable
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
"""
Elasticsearch (or Opensearch) reader over REST api.
This only uses the basic search api, so it will work with Elasticsearch and Opensearch.
"""
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class ElasticsearchReader(BasePydanticReader):
"""
Read documents from an Elasticsearch/Opensearch index.
These documents can then be used in a downstream Llama Index data structure.
Args:
endpoint (str): URL (http/https) of cluster
index (str): Name of the index (required)
httpx_client_args (dict): Optional additional args to pass to the `httpx.Client`
"""
is_remote: bool = True
endpoint: str
index: str
httpx_client_args: Optional[dict] = None
_client: Any = PrivateAttr()
def __init__(
self, endpoint: str, index: str, httpx_client_args: Optional[dict] = None
):
"""Initialize with parameters."""
super().__init__(
endpoint=endpoint, index=index, httpx_client_args=httpx_client_args
)
import_err_msg = """
`httpx` package not found. Install via `pip install httpx`
"""
try:
import httpx
except ImportError:
raise ImportError(import_err_msg)
self._client = httpx.Client(base_url=endpoint, **(httpx_client_args or {}))
@classmethod
def class_name(cls) -> str:
return "ElasticsearchReader"
def load_data(
self,
field: str,
query: Optional[dict] = None,
embedding_field: Optional[str] = None,
metadata_fields: Optional[List[str]] = None,
) -> List[Document]:
"""
Read data from the Elasticsearch index.
Args:
field (str): Field in the document to retrieve text from
query (Optional[dict]): Elasticsearch JSON query DSL object.
For example:
{"query": {"match": {"message": {"query": "this is a test"}}}}
embedding_field (Optional[str]): If there are embeddings stored in
this index, this field can be used
to set the embedding field on the returned Document list.
metadata_fields (Optional[List[str]]): Fields used as metadata. Default
is all fields in the document except those specified by the
field and embedding_field parameters.
Returns:
List[Document]: A list of documents.
"""
res = self._client.post(f"{self.index}/_search", json=query).json()
documents = []
for hit in res["hits"]["hits"]:
doc_id = hit["_id"]
value = hit["_source"][field]
embedding = hit["_source"].get(embedding_field or "", None)
if metadata_fields:
metadata = {
k: v for k, v in hit["_source"].items() if k in metadata_fields
}
else:
hit["_source"].pop(field)
hit["_source"].pop(embedding_field or "", None)
metadata = hit["_source"]
documents.append(
Document(id_=doc_id, text=value, metadata=metadata, embedding=embedding)
)
return documents
|
"""
Elasticsearch (or Opensearch) reader over REST api.
This only uses the basic search api, so it will work with Elasticsearch and Opensearch.
"""
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import PrivateAttr
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class ElasticsearchReader(BasePydanticReader):
"""
Read documents from an Elasticsearch/Opensearch index.
These documents can then be used in a downstream Llama Index data structure.
Args:
endpoint (str): URL (http/https) of cluster
index (str): Name of the index (required)
httpx_client_args (dict): Optional additional args to pass to the `httpx.Client`
"""
is_remote: bool = True
endpoint: str
index: str
httpx_client_args: Optional[dict] = None
_client: Any = PrivateAttr()
def __init__(
self, endpoint: str, index: str, httpx_client_args: Optional[dict] = None
):
"""Initialize with parameters."""
super().__init__(
endpoint=endpoint, index=index, httpx_client_args=httpx_client_args
)
import_err_msg = """
`httpx` package not found. Install via `pip install httpx`
"""
try:
import httpx
except ImportError:
raise ImportError(import_err_msg)
self._client = httpx.Client(base_url=endpoint, **(httpx_client_args or {}))
@classmethod
def class_name(cls) -> str:
return "ElasticsearchReader"
def load_data(
self,
field: str,
query: Optional[dict] = None,
embedding_field: Optional[str] = None,
metadata_fields: Optional[List[str]] = None,
) -> List[Document]:
"""
Read data from the Elasticsearch index.
Args:
field (str): Field in the document to retrieve text from
query (Optional[dict]): Elasticsearch JSON query DSL object.
For example:
{"query": {"match": {"message": {"query": "this is a test"}}}}
embedding_field (Optional[str]): If there are embeddings stored in
this index, this field can be used
to set the embedding field on the returned Document list.
metadata_fields (Optional[List[str]]): Fields used as metadata. Default
is all fields in the document except those specified by the
field and embedding_field parameters.
Returns:
List[Document]: A list of documents.
"""
res = self._client.post(f"{self.index}/_search", json=query).json()
documents = []
for hit in res["hits"]["hits"]:
doc_id = hit["_id"]
value = hit["_source"][field]
embedding = hit["_source"].get(embedding_field or "", None)
if metadata_fields:
metadata = {
k: v for k, v in hit["_source"].items() if k in metadata_fields
}
else:
hit["_source"].pop(field)
hit["_source"].pop(embedding_field or "", None)
metadata = hit["_source"]
documents.append(
Document(id_=doc_id, text=value, metadata=metadata, embedding=embedding)
)
return documents
|
import argparse
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.servers import BaseServer
try:
total_time = 0
total_success = 0
timeout = args.timeout / 1000 if args.timeout != -1 else None
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=timeout)
else:
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = BaseServer.is_ready(
ctrl_address=f'{hostname}:{port}',
timeout=timeout,
protocol=protocol,
)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
from jina.enums import ProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a Deployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina.clients import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.servers import BaseServer
try:
total_time = 0
total_success = 0
timeout = args.timeout / 1000 if args.timeout != -1 else None
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=timeout)
else:
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = BaseServer.is_ready(
ctrl_address=f'{hostname}:{port}',
timeout=timeout,
protocol=protocol,
)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.debug(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.debug(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.debug(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.debug(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.5'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
"""
This script trains sentence transformers with a triplet loss function.
As corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
from sentence_transformers.losses import TripletLoss
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 1
output_dir = "output/training-wikipedia-sections-" + model_name + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the Wikipedia-Sections dataset: https://huggingface.co/datasets/sentence-transformers/wikipedia-sections
train_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="train").select(
range(10_000)
)
eval_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="validation").select(
range(1000)
)
test_dataset = load_dataset("sentence-transformers/wikipedia-sections", "triplet", split="test").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss
# TripletLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#tripletloss) needs three text columns
train_loss = TripletLoss(model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = TripletEvaluator(
anchors=eval_dataset[:1000]["anchor"],
positives=eval_dataset[:1000]["positive"],
negatives=eval_dataset[:1000]["negative"],
name="wikipedia-sections-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="wikipedia-sections-triplet", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = TripletEvaluator(
anchors=test_dataset["anchor"],
positives=test_dataset["positive"],
negatives=test_dataset["negative"],
name="wikipedia-sections-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-wikipedia-sections-triplet")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-wikipedia-sections-triplet')`."
)
|
__version__ = '0.13.14'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.13'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_NO_RICH_HANDLER' not in os.environ:
from rich.traceback import install
install()
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute()
dll_path = [
curr_path.parents[1],
curr_path.parents[0] / "bin",
curr_path.parents[0] / "lib",
]
if system() in ("Windows", "Microsoft"):
dll_path.append(curr_path.parents[1] / "Release")
dll_path.append(curr_path.parents[1] / "windows" / "x64" / "DLL")
dll_path = [p / "lib_lightgbm.dll" for p in dll_path]
else:
dll_path = [p / "lib_lightgbm.so" for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = "\n".join(map(str, dll_path))
raise Exception(f"Cannot find lightgbm library file in following paths:\n{dll_path_joined}")
return lib_path
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute()
dll_path = [curr_path.parents[1],
curr_path.parents[0] / 'bin',
curr_path.parents[0] / 'lib']
if system() in ('Windows', 'Microsoft'):
dll_path.append(curr_path.parents[1] / 'Release')
dll_path.append(curr_path.parents[1] / 'windows' / 'x64' / 'DLL')
dll_path = [p / 'lib_lightgbm.dll' for p in dll_path]
else:
dll_path = [p / 'lib_lightgbm.so' for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = '\n'.join(map(str, dll_path))
raise Exception(f'Cannot find lightgbm library file in following paths:\n{dll_path_joined}')
return lib_path
|
from dataclasses import dataclass, fields
import numpy as np
import pytest
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
)
from sklearn.pipeline import Pipeline
from sklearn.utils import (
Tags,
get_tags,
)
from sklearn.utils.estimator_checks import (
check_estimator_tags_renamed,
check_valid_tag_types,
)
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
class EmptyTransformer(TransformerMixin, BaseEstimator):
pass
class EmptyRegressor(RegressorMixin, BaseEstimator):
pass
# TODO(1.8): Update when implementing __sklearn_tags__ is required
@pytest.mark.filterwarnings(
"ignore:.*no attribute '__sklearn_tags__'.*:DeprecationWarning"
)
@pytest.mark.parametrize(
"estimator, value",
[
[NoTagsEstimator(), False],
[ClassifierEstimator(), True],
[EmptyTransformer(), False],
[EmptyRegressor(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
def test_no___sklearn_tags__with_more_tags():
"""Test that calling `get_tags` on a class that defines `_more_tags` but not
`__sklearn_tags__` raises an error.
"""
class MoreTagsEstimator(BaseEstimator):
def _more_tags(self):
return {"requires_y": True} # pragma: no cover
with pytest.raises(
TypeError, match="has defined either `_more_tags` or `_get_tags`"
):
check_estimator_tags_renamed("MoreTagsEstimator", MoreTagsEstimator())
def test_tag_test_passes_with_inheritance():
@dataclass
class MyTags(Tags):
my_tag: bool = True # type: ignore[annotation-unchecked]
class MyEstimator(BaseEstimator):
def __sklearn_tags__(self):
tags_orig = super().__sklearn_tags__()
as_dict = {
field.name: getattr(tags_orig, field.name)
for field in fields(tags_orig)
}
tags = MyTags(**as_dict)
tags.my_tag = True
return tags
check_valid_tag_types("MyEstimator", MyEstimator())
# TODO(1.8): Update this test to check for errors
def test_tags_no_sklearn_tags_concrete_implementation():
"""Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/30479
Either the estimator doesn't implement `__sklearn_tags` or there is no class
implementing `__sklearn_tags__` without calling `super().__sklearn_tags__()` in
its mro. Thus, we raise a warning and request to inherit from
`BaseEstimator` that implements `__sklearn_tags__`.
"""
X = np.array([[1, 2], [2, 3], [3, 4]])
y = np.array([1, 0, 1])
# 1st case, the estimator inherits from a class that only implements
# `__sklearn_tags__` by calling `super().__sklearn_tags__()`.
class MyEstimator(ClassifierMixin):
def __init__(self, *, param=1):
self.param = param
def fit(self, X, y=None):
self.is_fitted_ = True
return self
def predict(self, X):
return np.full(shape=X.shape[0], fill_value=self.param)
my_pipeline = Pipeline([("estimator", MyEstimator(param=1))])
with pytest.warns(DeprecationWarning, match="The following error was raised"):
my_pipeline.fit(X, y).predict(X)
# 2nd case, the estimator doesn't implement `__sklearn_tags__` at all.
class MyEstimator2:
def __init__(self, *, param=1):
self.param = param
def fit(self, X, y=None):
self.is_fitted_ = True
return self
def predict(self, X):
return np.full(shape=X.shape[0], fill_value=self.param)
my_pipeline = Pipeline([("estimator", MyEstimator2(param=1))])
with pytest.warns(DeprecationWarning, match="The following error was raised"):
my_pipeline.fit(X, y).predict(X)
# check that we still raise an error if it is not a AttributeError or related to
# __sklearn_tags__
class MyEstimator3(MyEstimator, BaseEstimator):
def __init__(self, *, param=1, error_type=AttributeError):
self.param = param
self.error_type = error_type
def __sklearn_tags__(self):
super().__sklearn_tags__()
raise self.error_type("test")
for error_type in (AttributeError, TypeError, ValueError):
estimator = MyEstimator3(param=1, error_type=error_type)
with pytest.raises(error_type):
get_tags(estimator)
|
from dataclasses import dataclass, fields
import numpy as np
import pytest
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
)
from sklearn.pipeline import Pipeline
from sklearn.utils import (
Tags,
get_tags,
)
from sklearn.utils.estimator_checks import (
check_estimator_tags_renamed,
check_valid_tag_types,
)
class NoTagsEstimator:
pass
class ClassifierEstimator:
# This is to test whether not inheriting from mixins works.
_estimator_type = "classifier"
class EmptyTransformer(TransformerMixin, BaseEstimator):
pass
class EmptyRegressor(RegressorMixin, BaseEstimator):
pass
# TODO(1.8): Update when implementing __sklearn_tags__ is required
@pytest.mark.filterwarnings(
"ignore:.*no attribute '__sklearn_tags__'.*:DeprecationWarning"
)
@pytest.mark.parametrize(
"estimator, value",
[
[NoTagsEstimator(), False],
[ClassifierEstimator(), True],
[EmptyTransformer(), False],
[EmptyRegressor(), True],
[BaseEstimator(), False],
],
)
def test_requires_y(estimator, value):
assert get_tags(estimator).target_tags.required == value
def test_no___sklearn_tags__with_more_tags():
"""Test that calling `get_tags` on a class that defines `_more_tags` but not
`__sklearn_tags__` raises an error.
"""
class MoreTagsEstimator(BaseEstimator):
def _more_tags(self):
return {"requires_y": True} # pragma: no cover
with pytest.raises(
TypeError, match="has defined either `_more_tags` or `_get_tags`"
):
check_estimator_tags_renamed("MoreTagsEstimator", MoreTagsEstimator())
def test_tag_test_passes_with_inheritance():
@dataclass
class MyTags(Tags):
my_tag: bool = True
class MyEstimator(BaseEstimator):
def __sklearn_tags__(self):
tags_orig = super().__sklearn_tags__()
as_dict = {
field.name: getattr(tags_orig, field.name)
for field in fields(tags_orig)
}
tags = MyTags(**as_dict)
tags.my_tag = True
return tags
check_valid_tag_types("MyEstimator", MyEstimator())
# TODO(1.8): Update this test to check for errors
def test_tags_no_sklearn_tags_concrete_implementation():
"""Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/30479
Either the estimator doesn't implement `__sklearn_tags` or there is no class
implementing `__sklearn_tags__` without calling `super().__sklearn_tags__()` in
its mro. Thus, we raise a warning and request to inherit from
`BaseEstimator` that implements `__sklearn_tags__`.
"""
X = np.array([[1, 2], [2, 3], [3, 4]])
y = np.array([1, 0, 1])
# 1st case, the estimator inherits from a class that only implements
# `__sklearn_tags__` by calling `super().__sklearn_tags__()`.
class MyEstimator(ClassifierMixin):
def __init__(self, *, param=1):
self.param = param
def fit(self, X, y=None):
self.is_fitted_ = True
return self
def predict(self, X):
return np.full(shape=X.shape[0], fill_value=self.param)
my_pipeline = Pipeline([("estimator", MyEstimator(param=1))])
with pytest.warns(DeprecationWarning, match="The following error was raised"):
my_pipeline.fit(X, y).predict(X)
# 2nd case, the estimator doesn't implement `__sklearn_tags__` at all.
class MyEstimator2:
def __init__(self, *, param=1):
self.param = param
def fit(self, X, y=None):
self.is_fitted_ = True
return self
def predict(self, X):
return np.full(shape=X.shape[0], fill_value=self.param)
my_pipeline = Pipeline([("estimator", MyEstimator2(param=1))])
with pytest.warns(DeprecationWarning, match="The following error was raised"):
my_pipeline.fit(X, y).predict(X)
# check that we still raise an error if it is not a AttributeError or related to
# __sklearn_tags__
class MyEstimator3(MyEstimator, BaseEstimator):
def __init__(self, *, param=1, error_type=AttributeError):
self.param = param
self.error_type = error_type
def __sklearn_tags__(self):
super().__sklearn_tags__()
raise self.error_type("test")
for error_type in (AttributeError, TypeError, ValueError):
estimator = MyEstimator3(param=1, error_type=error_type)
with pytest.raises(error_type):
get_tags(estimator)
|
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
"bundle,task,channel,expected_score",
[
[CONVTASNET_BASE_LIBRI2MIX, "speech_separation", 1, 8.1373],
[HDEMUCS_HIGH_MUSDB_PLUS, "music_separation", 2, 8.7480],
[HDEMUCS_HIGH_MUSDB, "music_separation", 2, 8.0697],
],
)
def test_source_separation_models(bundle, task, channel, expected_score, mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, channel, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB).
Si-SDR score should be equal to or larger than the expected score.
"""
model = bundle.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, channel, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
estimated_sources = estimated_sources.reshape(1, -1, clean_waveforms.shape[-1])
sdr_values = sdr(estimated_sources, clean_waveforms).mean()
assert sdr_values >= expected_score
|
import os
import sys
import pytest
import torch
import torchaudio
from torchaudio.prototype.pipelines import CONVTASNET_BASE_LIBRI2MIX, HDEMUCS_HIGH_MUSDB_PLUS
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "examples"))
from source_separation.utils.metrics import sdr
@pytest.mark.parametrize(
"bundle,task,channel,expected_score",
[
[CONVTASNET_BASE_LIBRI2MIX, "speech_separation", 1, 8.1373],
[HDEMUCS_HIGH_MUSDB_PLUS, "music_separation", 2, 8.7480],
],
)
def test_source_separation_models(bundle, task, channel, expected_score, mixture_source, clean_sources):
"""Integration test for the source separation pipeline.
Given the mixture waveform with dimensions `(batch, channel, time)`, the pre-trained pipeline generates
the separated sources Tensor with dimensions `(batch, num_sources, time)`.
The test computes the scale-invariant signal-to-distortion ratio (Si-SDR) score in decibel (dB).
Si-SDR score should be equal to or larger than the expected score.
"""
model = bundle.get_model()
mixture_waveform, sample_rate = torchaudio.load(mixture_source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms = []
for source in clean_sources:
clean_waveform, sample_rate = torchaudio.load(source)
assert sample_rate == bundle.sample_rate, "The sample rate of audio must match that in the bundle."
clean_waveforms.append(clean_waveform)
mixture_waveform = mixture_waveform.reshape(1, channel, -1)
estimated_sources = model(mixture_waveform)
clean_waveforms = torch.cat(clean_waveforms).unsqueeze(0)
estimated_sources = estimated_sources.reshape(1, -1, clean_waveforms.shape[-1])
sdr_values = sdr(estimated_sources, clean_waveforms).mean()
assert sdr_values >= expected_score
|
from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""
This is an abstract base class that represents the shared features across the NeptuneDatabaseGraphStore
and NeptuneAnalyticsGraphStore classes.
"""
def __init__() -> None:
pass
@property
def client(self) -> Any:
return self._client
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
query = """
MATCH (n1:%s)-[r]->(n2:%s)
WHERE n1.id = $subj
RETURN type(r), n2.id;
"""
prepared_statement = query % (self.node_label, self.node_label)
with self._driver.session(database=self._database) as session:
data = session.run(prepared_statement, {"subj": subj})
return [record.values() for record in data]
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30
) -> Dict[str, List[List[str]]]:
"""Get flat rel map."""
rel_map: Dict[Any, List[Any]] = {}
if subjs is None or len(subjs) == 0:
return rel_map
query = f"""MATCH p=(n1:{self.node_label})-[*1..{depth}]->() WHERE n1.id IN $subjs
UNWIND relationships(p) AS rel WITH n1.id AS subj, p,
collect([type(rel), endNode(rel).id])AS flattened_rels
UNWIND flattened_rels as fr
WITH DISTINCT fr, subj
RETURN subj, collect(fr) AS flattened_rels LIMIT {limit}"""
data = list(self.query(query, {"subjs": subjs}))
if not data:
return rel_map
for record in data:
rel_map[record["subj"]] = record["flattened_rels"]
return rel_map
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet to the graph."""
query = """
MERGE (n1:`%s` {id:$subj})
MERGE (n2:`%s` {id:$obj})
MERGE (n1)-[:`%s`]->(n2)
"""
prepared_statement = query % (
self.node_label.replace("`", ""),
self.node_label.replace("`", ""),
rel.replace(" ", "_").replace("`", "").upper(),
)
self.query(
prepared_statement,
{"subj": subj.replace("`", ""), "obj": obj.replace("`", "")},
)
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet from the graph."""
def delete_rel(subj: str, obj: str, rel: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
(
"MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $subj AND n2.id"
" = $obj DELETE r"
).format(self.node_label, rel, self.node_label),
{"subj": subj, "obj": obj},
)
def delete_entity(entity: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
"MATCH (n:%s) WHERE n.id = $entity DETACH DELETE n"
% self.node_label,
{"entity": entity},
)
delete_rel(subj, obj, rel)
delete_entity(subj)
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the Neptune KG store."""
if refresh or not self.schema:
self.schema = refresh_schema(self.query, self._get_summary())["schema_str"]
return self.schema
@abstractmethod
def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def _get_summary(self) -> Dict:
raise NotImplementedError
|
from abc import abstractmethod
import logging
from typing import Any, Dict, List, Optional
from llama_index.core.graph_stores.types import GraphStore
from .neptune import refresh_schema
logger = logging.getLogger(__name__)
class NeptuneBaseGraphStore(GraphStore):
"""This is an abstract base class that represents the shared features across the NeptuneDatabaseGraphStore
and NeptuneAnalyticsGraphStore classes.
"""
def __init__() -> None:
pass
@property
def client(self) -> Any:
return self._client
def get(self, subj: str) -> List[List[str]]:
"""Get triplets."""
query = """
MATCH (n1:%s)-[r]->(n2:%s)
WHERE n1.id = $subj
RETURN type(r), n2.id;
"""
prepared_statement = query % (self.node_label, self.node_label)
with self._driver.session(database=self._database) as session:
data = session.run(prepared_statement, {"subj": subj})
return [record.values() for record in data]
def get_rel_map(
self, subjs: Optional[List[str]] = None, depth: int = 2, limit: int = 30
) -> Dict[str, List[List[str]]]:
"""Get flat rel map."""
rel_map: Dict[Any, List[Any]] = {}
if subjs is None or len(subjs) == 0:
return rel_map
query = f"""MATCH p=(n1:{self.node_label})-[*1..{depth}]->() WHERE n1.id IN $subjs
UNWIND relationships(p) AS rel WITH n1.id AS subj, p,
collect([type(rel), endNode(rel).id])AS flattened_rels
UNWIND flattened_rels as fr
WITH DISTINCT fr, subj
RETURN subj, collect(fr) AS flattened_rels LIMIT {limit}"""
data = list(self.query(query, {"subjs": subjs}))
if not data:
return rel_map
for record in data:
rel_map[record["subj"]] = record["flattened_rels"]
return rel_map
def upsert_triplet(self, subj: str, rel: str, obj: str) -> None:
"""Add triplet to the graph."""
query = """
MERGE (n1:`%s` {id:$subj})
MERGE (n2:`%s` {id:$obj})
MERGE (n1)-[:`%s`]->(n2)
"""
prepared_statement = query % (
self.node_label.replace("`", ""),
self.node_label.replace("`", ""),
rel.replace(" ", "_").replace("`", "").upper(),
)
self.query(
prepared_statement,
{"subj": subj.replace("`", ""), "obj": obj.replace("`", "")},
)
def delete(self, subj: str, rel: str, obj: str) -> None:
"""Delete triplet from the graph."""
def delete_rel(subj: str, obj: str, rel: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
(
"MATCH (n1:{})-[r:{}]->(n2:{}) WHERE n1.id = $subj AND n2.id"
" = $obj DELETE r"
).format(self.node_label, rel, self.node_label),
{"subj": subj, "obj": obj},
)
def delete_entity(entity: str) -> None:
with self._driver.session(database=self._database) as session:
session.run(
"MATCH (n:%s) WHERE n.id = $entity DETACH DELETE n"
% self.node_label,
{"entity": entity},
)
delete_rel(subj, obj, rel)
delete_entity(subj)
def get_schema(self, refresh: bool = False) -> str:
"""Get the schema of the Neptune KG store."""
if refresh or not self.schema:
self.schema = refresh_schema(self.query, self._get_summary())["schema_str"]
return self.schema
@abstractmethod
def query(self, query: str, params: dict = {}) -> Dict[str, Any]:
raise NotImplementedError
@abstractmethod
def _get_summary(self) -> Dict:
raise NotImplementedError
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, Series
PANDAS_INSTALLED = True
except ImportError:
DataFrame = object
Series = object
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
from sklearn.preprocessing import LabelEncoder
try:
from sklearn.model_selection import KFold as XGBKFold
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import KFold as XGBKFold
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
XGBModelBase = object
XGBClassifierBase = object
XGBRegressorBase = object
LabelEncoder = object
XGBKFold = None
XGBStratifiedKFold = None
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
from pandas import concat as pd_concat
return pd_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
|
# pylint: disable=invalid-name,unused-import
"""For compatibility and optional dependencies."""
import importlib.util
import logging
import sys
import types
from typing import Any, Sequence, cast
import numpy as np
from ._typing import _T
assert sys.version_info[0] == 3, "Python 2 is no longer supported."
def py_str(x: bytes | None) -> str:
"""convert c string back to python string"""
assert x is not None # ctypes might return None
return x.decode("utf-8") # type: ignore
def lazy_isinstance(instance: Any, module: str, name: str) -> bool:
"""Use string representation to identify a type."""
# Notice, we use .__class__ as opposed to type() in order
# to support object proxies such as weakref.proxy
cls = instance.__class__
is_same_module = cls.__module__ == module
has_same_name = cls.__name__ == name
return is_same_module and has_same_name
# pandas
try:
from pandas import DataFrame, MultiIndex, Series
from pandas import concat as pandas_concat
PANDAS_INSTALLED = True
except ImportError:
MultiIndex = object
DataFrame = object
Series = object
pandas_concat = None
PANDAS_INSTALLED = False
# sklearn
try:
from sklearn.base import BaseEstimator as XGBModelBase
from sklearn.base import ClassifierMixin as XGBClassifierBase
from sklearn.base import RegressorMixin as XGBRegressorBase
from sklearn.preprocessing import LabelEncoder
try:
from sklearn.model_selection import KFold as XGBKFold
from sklearn.model_selection import StratifiedKFold as XGBStratifiedKFold
except ImportError:
from sklearn.cross_validation import KFold as XGBKFold
from sklearn.cross_validation import StratifiedKFold as XGBStratifiedKFold
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
# used for compatibility without sklearn
XGBModelBase = object
XGBClassifierBase = object
XGBRegressorBase = object
LabelEncoder = object
XGBKFold = None
XGBStratifiedKFold = None
_logger = logging.getLogger(__name__)
def is_cudf_available() -> bool:
"""Check cuDF package available or not"""
if importlib.util.find_spec("cudf") is None:
return False
try:
import cudf
return True
except ImportError:
_logger.exception("Importing cuDF failed, use DMatrix instead of QDM")
return False
def is_cupy_available() -> bool:
"""Check cupy package available or not"""
if importlib.util.find_spec("cupy") is None:
return False
try:
import cupy
return True
except ImportError:
return False
def import_cupy() -> types.ModuleType:
"""Import cupy."""
if not is_cupy_available():
raise ImportError("`cupy` is required for handling CUDA buffer.")
import cupy # pylint: disable=import-error
return cupy
try:
import scipy.sparse as scipy_sparse
from scipy.sparse import csr_matrix as scipy_csr
except ImportError:
scipy_sparse = False
scipy_csr = object
def concat(value: Sequence[_T]) -> _T: # pylint: disable=too-many-return-statements
"""Concatenate row-wise."""
if isinstance(value[0], np.ndarray):
value_arr = cast(Sequence[np.ndarray], value)
return np.concatenate(value_arr, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.csr_matrix):
return scipy_sparse.vstack(value, format="csr")
if scipy_sparse and isinstance(value[0], scipy_sparse.csc_matrix):
return scipy_sparse.vstack(value, format="csc")
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
# other sparse format will be converted to CSR.
return scipy_sparse.vstack(value, format="csr")
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], "cudf.core.dataframe", "DataFrame") or lazy_isinstance(
value[0], "cudf.core.series", "Series"
):
from cudf import concat as CUDF_concat # pylint: disable=import-error
return CUDF_concat(value, axis=0)
from .data import _is_cupy_alike
if _is_cupy_alike(value[0]):
import cupy # pylint: disable=import-error
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
arr = cast(cupy.ndarray, v)
d_v = arr.device.id
assert d_v == d, "Concatenating arrays on different devices."
return cupy.concatenate(value, axis=0)
raise TypeError("Unknown type.")
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def register_dadaptation_optimizers() -> List[str]:
"""Register optimizers in ``dadaptation`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
dadaptation_optimizers = []
try:
import dadaptation
except ImportError:
pass
else:
for module_name in ['DAdaptAdaGrad', 'DAdaptAdam', 'DAdaptSGD']:
_optim = getattr(dadaptation, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
dadaptation_optimizers.append(module_name)
return dadaptation_optimizers
DADAPTATION_OPTIMIZERS = register_dadaptation_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
import json
import os
import pytest
from jina import __version__
from jina.hubble import HubExecutor
from jina.hubble.hubio import HubIO
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
def test_get_image_name(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_fetch(
name,
tag,
image_required=True,
rebuild_image=True,
*,
secret=None,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
uses = 'jinahub://DummyExecutor'
image_name = get_image_name(uses)
assert image_name == 'jinahub/DummyExecutor'
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
import json
import os
import pytest
from jina import __version__
from jina.hubble import HubExecutor
from jina.hubble.hubio import HubIO
from jina.orchestrate.deployments.config.helper import (
get_base_executor_version,
get_image_name,
to_compatible_name,
)
@pytest.mark.parametrize('is_master', (True, False))
def test_version(is_master, requests_mock):
if is_master:
count = 0
else:
# current version is published already
count = 3
requests_mock.get(
'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags',
text=json.dumps(
{
'count': count,
'next': 'abc',
'previous': 'def',
'results': [{'a': 'b', 'c': 'd'}],
}
),
)
v = get_base_executor_version()
if is_master:
assert v == 'master'
else:
assert v == __version__
def test_to_compatible_name():
assert to_compatible_name('executor/hey-ha_HO') == 'executor-hey-ha-ho'
def test_get_image_name(mocker, monkeypatch):
mock = mocker.Mock()
def _mock_fetch(
name,
tag=None,
secret=None,
image_required=True,
rebuild_image=True,
force=False,
):
mock(name=name, rebuild_image=rebuild_image)
return (
HubExecutor(
uuid='hello',
name=name,
tag='v0',
image_name=f'jinahub/{name}',
md5sum=None,
visibility=True,
archive_url=None,
),
False,
)
monkeypatch.setattr(HubIO, 'fetch_meta', _mock_fetch)
uses = 'jinahub://DummyExecutor'
image_name = get_image_name(uses)
assert image_name == 'jinahub/DummyExecutor'
_, mock_kwargs = mock.call_args_list[0]
assert mock_kwargs['rebuild_image'] is True # default value must be True
os.environ['JINA_HUB_NO_IMAGE_REBUILD'] = '1'
get_image_name(uses)
del os.environ['JINA_HUB_NO_IMAGE_REBUILD']
_, mock_kwargs = mock.call_args_list[1]
assert mock_kwargs['rebuild_image'] is False # env var is set, so it must be False
|
from langchain_core.documents import BaseDocumentTransformer, Document
__all__ = ["BaseDocumentTransformer", "Document"]
|
from langchain_core.documents import BaseDocumentTransformer, Document
__all__ = ["Document", "BaseDocumentTransformer"]
|
import logging
import os
import torch
from torchaudio._internal import download_url_to_file, module_utils as _mod_utils
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
)
def _get_phones():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
)
def _to_tensor(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths
def _load_phonemizer(file, dl_kwargs):
if not _mod_utils.is_module_available("dp"):
raise RuntimeError("DeepPhonemizer is not installed. Please install it.")
from dp.phonemizer import Phonemizer
# By default, dp issues DEBUG level log.
logger = logging.getLogger("dp")
orig_level = logger.level
logger.setLevel(logging.INFO)
try:
url = f"https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}"
directory = os.path.join(torch.hub.get_dir(), "checkpoints")
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, file)
if not os.path.exists(path):
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
download_url_to_file(url, path, **dl_kwargs)
return Phonemizer.from_checkpoint(path)
finally:
logger.setLevel(orig_level)
def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
waveform = torch.clamp(waveform, -1, 1)
waveform = (waveform + 1.0) * (2**bits - 1) / 2
return torch.clamp(waveform, 0, 2**bits - 1).int()
def _get_taco_params(n_symbols):
return {
"mask_padding": False,
"n_mels": 80,
"n_frames_per_step": 1,
"symbol_embedding_dim": 512,
"encoder_embedding_dim": 512,
"encoder_n_convolution": 3,
"encoder_kernel_size": 5,
"decoder_rnn_dim": 1024,
"decoder_max_step": 2000,
"decoder_dropout": 0.1,
"decoder_early_stopping": True,
"attention_rnn_dim": 1024,
"attention_hidden_dim": 128,
"attention_location_n_filter": 32,
"attention_location_kernel_size": 31,
"attention_dropout": 0.1,
"prenet_dim": 256,
"postnet_n_convolution": 5,
"postnet_kernel_size": 5,
"postnet_embedding_dim": 512,
"gate_threshold": 0.5,
"n_symbol": n_symbols,
}
def _get_wrnn_params():
return {
"upsample_scales": [5, 5, 11],
"n_classes": 2**8, # n_bits = 8
"hop_length": 275,
"n_res_block": 10,
"n_rnn": 512,
"n_fc": 512,
"kernel_size": 5,
"n_freq": 80,
"n_hidden": 128,
"n_output": 128,
}
|
import logging
import os
import torch
from torchaudio._internal import (
download_url_to_file,
module_utils as _mod_utils,
)
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
)
def _get_phones():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
)
def _to_tensor(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths
def _load_phonemizer(file, dl_kwargs):
if not _mod_utils.is_module_available("dp"):
raise RuntimeError("DeepPhonemizer is not installed. Please install it.")
from dp.phonemizer import Phonemizer
# By default, dp issues DEBUG level log.
logger = logging.getLogger("dp")
orig_level = logger.level
logger.setLevel(logging.INFO)
try:
url = f"https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}"
directory = os.path.join(torch.hub.get_dir(), "checkpoints")
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, file)
if not os.path.exists(path):
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
download_url_to_file(url, path, **dl_kwargs)
return Phonemizer.from_checkpoint(path)
finally:
logger.setLevel(orig_level)
def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
waveform = torch.clamp(waveform, -1, 1)
waveform = (waveform + 1.0) * (2**bits - 1) / 2
return torch.clamp(waveform, 0, 2**bits - 1).int()
def _get_taco_params(n_symbols):
return {
"mask_padding": False,
"n_mels": 80,
"n_frames_per_step": 1,
"symbol_embedding_dim": 512,
"encoder_embedding_dim": 512,
"encoder_n_convolution": 3,
"encoder_kernel_size": 5,
"decoder_rnn_dim": 1024,
"decoder_max_step": 2000,
"decoder_dropout": 0.1,
"decoder_early_stopping": True,
"attention_rnn_dim": 1024,
"attention_hidden_dim": 128,
"attention_location_n_filter": 32,
"attention_location_kernel_size": 31,
"attention_dropout": 0.1,
"prenet_dim": 256,
"postnet_n_convolution": 5,
"postnet_kernel_size": 5,
"postnet_embedding_dim": 512,
"gate_threshold": 0.5,
"n_symbol": n_symbols,
}
def _get_wrnn_params():
return {
"upsample_scales": [5, 5, 11],
"n_classes": 2**8, # n_bits = 8
"hop_length": 275,
"n_res_block": 10,
"n_rnn": 512,
"n_fc": 512,
"kernel_size": 5,
"n_freq": 80,
"n_hidden": 128,
"n_output": 128,
}
|
def list_all_runtimes():
"""List all public runtimes that can be used directly with :class:`jina.orchestrate.pods.Pod`
# noqa: DAR101
# noqa: DAR201
"""
from jina.serve.runtimes.base import BaseRuntime
from jina.serve.runtimes.worker import WorkerRuntime
return [
k
for k, s in locals().items()
if isinstance(s, type) and issubclass(s, BaseRuntime)
]
def get_runtime(name: str):
"""Get a public runtime by its name
# noqa: DAR101
# noqa: DAR201
"""
from jina.serve.runtimes.base import BaseRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.head import HeadRuntime
from jina.serve.runtimes.worker import WorkerRuntime
s = locals()[name]
if isinstance(s, type) and issubclass(s, BaseRuntime):
return s
else:
raise TypeError(f'{s!r} is not in type {BaseRuntime!r}')
|
def list_all_runtimes():
"""List all public runtimes that can be used directly with :class:`jina.orchestrate.pods.Pod`
# noqa: DAR101
# noqa: DAR201
"""
from jina.serve.runtimes.base import BaseRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
return [
k
for k, s in locals().items()
if isinstance(s, type) and issubclass(s, BaseRuntime)
]
def get_runtime(name: str):
"""Get a public runtime by its name
# noqa: DAR101
# noqa: DAR201
"""
from jina.serve.runtimes.base import BaseRuntime
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime
from jina.serve.runtimes.gateway.http import HTTPGatewayRuntime
from jina.serve.runtimes.gateway.websocket import WebSocketGatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.runtimes.head import HeadRuntime
s = locals()[name]
if isinstance(s, type) and issubclass(s, BaseRuntime):
return s
else:
raise TypeError(f'{s!r} is not in type {BaseRuntime!r}')
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
scores: list[float],
batch_size: int = 16,
main_similarity: str | SimilarityFunction | None = None,
similarity_fn_names: list[Literal["cosine", "euclidean", "manhattan", "dot"]] | None = None,
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] | None = None,
truncate_dim: int | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
scores=scores,
batch_size=batch_size,
main_similarity=main_similarity,
similarity_fn_names=similarity_fn_names,
name=name,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
precision=precision,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
precision=self.precision,
normalize_embeddings=bool(self.precision),
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self.name, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseEmbeddingSimilarityEvaluator(EmbeddingSimilarityEvaluator):
def __init__(
self,
sentences1: list[str],
sentences2: list[str],
scores: list[float],
batch_size: int = 16,
main_similarity: str | SimilarityFunction | None = None,
similarity_fn_names: list[Literal["cosine", "euclidean", "manhattan", "dot"]] | None = None,
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
precision: Literal["float32", "int8", "uint8", "binary", "ubinary"] | None = None,
truncate_dim: int | None = None,
):
return super().__init__(
sentences1=sentences1,
sentences2=sentences2,
scores=scores,
batch_size=batch_size,
main_similarity=main_similarity,
similarity_fn_names=similarity_fn_names,
name=name,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
precision=precision,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
precision=self.precision,
normalize_embeddings=bool(self.precision),
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self.name, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
from torch import Tensor
from mmdet.data_elements import SampleList
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .faster_rcnn import FasterRCNN
@MODELS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone: ConfigType,
rpn_head: ConfigType,
roi_head: ConfigType,
train_cfg: ConfigType,
test_cfg: ConfigType,
neck: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def _forward(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> tuple:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super()._forward(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def loss(self, batch_inputs: Tensor,
batch_data_samples: SampleList) -> dict:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().loss(
batch_inputs=batch_inputs, batch_data_samples=trident_data_samples)
def predict(self,
batch_inputs: Tensor,
batch_data_samples: SampleList,
rescale: bool = True) -> SampleList:
"""copy the ``batch_data_samples`` to fit multi-branch."""
num_branch = self.num_branch \
if self.training or self.test_branch_idx == -1 else 1
trident_data_samples = batch_data_samples * num_branch
return super().predict(
batch_inputs=batch_inputs,
batch_data_samples=trident_data_samples,
rescale=rescale)
# TODO need to refactor
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
backend_empty_cache,
backend_reset_max_memory_allocated,
backend_reset_peak_memory_stats,
load_numpy,
require_accelerator,
require_hf_hub_version_greater,
require_torch_accelerator,
require_transformers_version_greater,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@require_hf_hub_version_greater("0.26.5")
@require_transformers_version_greater("4.47.1")
def test_save_load_dduf(self):
super().test_save_load_dduf(atol=1e-2, rtol=1e-2)
@unittest.skip("Functionality is tested elsewhere.")
def test_save_load_optional_components(self):
pass
@slow
@require_torch_accelerator
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload(device=torch_device)
backend_reset_max_memory_allocated(torch_device)
backend_empty_cache(torch_device)
backend_reset_peak_memory_stats(torch_device)
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import (
IFPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
backend_empty_cache,
backend_reset_max_memory_allocated,
backend_reset_peak_memory_stats,
load_numpy,
require_accelerator,
require_hf_hub_version_greater,
require_torch_accelerator,
require_transformers_version_greater,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
pipeline_class = IFPipeline
params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
def get_dummy_components(self):
return self._get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def test_save_load_optional_components(self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU")
@require_accelerator
def test_save_load_float16(self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_float16(expected_max_diff=1e-1)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def test_save_load_local(self):
self._test_save_load_local()
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2,
)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@require_hf_hub_version_greater("0.26.5")
@require_transformers_version_greater("4.47.1")
def test_save_load_dduf(self):
super().test_save_load_dduf(atol=1e-2, rtol=1e-2)
@slow
@require_torch_accelerator
class IFPipelineSlowTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_if_text_to_image(self):
pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16)
pipe.unet.set_attn_processor(AttnAddedKVProcessor())
pipe.enable_model_cpu_offload(device=torch_device)
backend_reset_max_memory_allocated(torch_device)
backend_empty_cache(torch_device)
backend_reset_peak_memory_stats(torch_device)
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipe(
prompt="anime turtle",
num_inference_steps=2,
generator=generator,
output_type="np",
)
image = output.images[0]
mem_bytes = torch.cuda.max_memory_allocated()
assert mem_bytes < 12 * 10**9
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy"
)
assert_mean_pixel_difference(image, expected_image)
pipe.remove_all_hooks()
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
'rocksdict<=0.2.16',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
import re
from io import BytesIO
from pathlib import Path
from typing import Any, Type
import numpy as np
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_loaders.parsers.images import (
LLMImageBlobParser,
RapidOCRBlobParser,
TesseractBlobParser,
)
path_base = Path(__file__).parent.parent.parent
building_image = Blob.from_path(path_base / "examples/building.jpg")
text_image = Blob.from_path(path_base / "examples/text.png")
page_image = Blob.from_path(path_base / "examples/page.png")
_re_in_image = r"(?ms).*MAKE.*TEXT.*STAND.*OUT.*FROM.*"
@pytest.mark.parametrize(
"blob,body",
[
(Blob.from_path(path_base / "examples/text-gray.png"), _re_in_image),
],
)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_differents_files(
blob_loader: Type,
kw: dict[str, Any],
blob: Blob,
body: str,
) -> None:
if blob_loader == LLMImageBlobParser and "building" in str(blob.path):
body = ".*building.*"
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
assert re.compile(body).match(documents[0].page_content)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_numpy(
blob_loader: Type,
kw: dict[str, Any],
) -> None:
gray_image = np.empty(shape=(412, 1652, 1))
with BytesIO() as buffer:
np.save(buffer, gray_image)
buffer.seek(0)
npy_bytes = buffer.getvalue()
blob = Blob.from_data(npy_bytes, mime_type="application/x-npy")
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
|
import re
from pathlib import Path
from typing import Any, Type
import pytest
from langchain_core.documents.base import Blob
from langchain_core.language_models import FakeMessagesListChatModel
from langchain_core.messages import ChatMessage
from langchain_community.document_loaders.parsers.images import (
LLMImageBlobParser,
RapidOCRBlobParser,
TesseractBlobParser,
)
path_base = Path(__file__).parent.parent.parent
building_image = Blob.from_path(path_base / "examples/building.jpg")
text_image = Blob.from_path(path_base / "examples/text.png")
page_image = Blob.from_path(path_base / "examples/page.png")
@pytest.mark.parametrize(
"blob,body",
[
(building_image, ""),
(text_image, r"(?ms).*MAKE.*TEXT.*STAND.*OUT.*FROM.*BACKGROUNDS.*"),
],
)
@pytest.mark.parametrize(
"blob_loader,kw",
[
(RapidOCRBlobParser, {}),
(TesseractBlobParser, {}),
(
LLMImageBlobParser,
{
"model": FakeMessagesListChatModel(
responses=[
ChatMessage(
id="ai1",
role="system",
content="A building. MAKE TEXT STAND OUT FROM BACKGROUNDS",
),
]
)
},
),
],
)
def test_image_parser_with_differents_files(
blob_loader: Type,
kw: dict[str, Any],
blob: Blob,
body: str,
) -> None:
if blob_loader == LLMImageBlobParser and "building" in str(blob.path):
body = ".*building.*"
documents = list(blob_loader(**kw).lazy_parse(blob))
assert len(documents) == 1
assert re.compile(body).match(documents[0].page_content)
|
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
class ConditionBlock(Block):
class Input(BlockSchema):
value1: Any = SchemaField(
description="Enter the first value for comparison",
placeholder="For example: 10 or 'hello' or True",
)
operator: ComparisonOperator = SchemaField(
description="Choose the comparison operator",
placeholder="Select an operator",
)
value2: Any = SchemaField(
description="Enter the second value for comparison",
placeholder="For example: 20 or 'world' or False",
)
yes_value: Any = SchemaField(
description="(Optional) Value to output if the condition is true. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
no_value: Any = SchemaField(
description="(Optional) Value to output if the condition is false. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
class Output(BlockSchema):
result: bool = SchemaField(
description="The result of the condition evaluation (True or False)"
)
yes_output: Any = SchemaField(
description="The output value if the condition is true"
)
no_output: Any = SchemaField(
description="The output value if the condition is false"
)
def __init__(self):
super().__init__(
id="715696a0-e1da-45c8-b209-c2fa9c3b0be6",
input_schema=ConditionBlock.Input,
output_schema=ConditionBlock.Output,
description="Handles conditional logic based on comparison operators",
categories={BlockCategory.LOGIC},
test_input={
"value1": 10,
"operator": ComparisonOperator.GREATER_THAN.value,
"value2": 5,
"yes_value": "Greater",
"no_value": "Not greater",
},
test_output=[
("result", True),
("yes_output", "Greater"),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
operator = input_data.operator
value1 = input_data.value1
if isinstance(value1, str):
value1 = float(value1.strip())
value2 = input_data.value2
if isinstance(value2, str):
value2 = float(value2.strip())
yes_value = input_data.yes_value if input_data.yes_value is not None else value1
no_value = input_data.no_value if input_data.no_value is not None else value2
comparison_funcs = {
ComparisonOperator.EQUAL: lambda a, b: a == b,
ComparisonOperator.NOT_EQUAL: lambda a, b: a != b,
ComparisonOperator.GREATER_THAN: lambda a, b: a > b,
ComparisonOperator.LESS_THAN: lambda a, b: a < b,
ComparisonOperator.GREATER_THAN_OR_EQUAL: lambda a, b: a >= b,
ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b,
}
result = comparison_funcs[operator](value1, value2)
yield "result", result
if result:
yield "yes_output", yes_value
else:
yield "no_output", no_value
|
from enum import Enum
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
class ComparisonOperator(Enum):
EQUAL = "=="
NOT_EQUAL = "!="
GREATER_THAN = ">"
LESS_THAN = "<"
GREATER_THAN_OR_EQUAL = ">="
LESS_THAN_OR_EQUAL = "<="
class ConditionBlock(Block):
class Input(BlockSchema):
value1: Any = SchemaField(
description="Enter the first value for comparison",
placeholder="For example: 10 or 'hello' or True",
)
operator: ComparisonOperator = SchemaField(
description="Choose the comparison operator",
placeholder="Select an operator",
)
value2: Any = SchemaField(
description="Enter the second value for comparison",
placeholder="For example: 20 or 'world' or False",
)
yes_value: Any = SchemaField(
description="(Optional) Value to output if the condition is true. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
no_value: Any = SchemaField(
description="(Optional) Value to output if the condition is false. If not provided, value1 will be used.",
placeholder="Leave empty to use value1, or enter a specific value",
default=None,
)
class Output(BlockSchema):
result: bool = SchemaField(
description="The result of the condition evaluation (True or False)"
)
yes_output: Any = SchemaField(
description="The output value if the condition is true"
)
no_output: Any = SchemaField(
description="The output value if the condition is false"
)
def __init__(self):
super().__init__(
id="715696a0-e1da-45c8-b209-c2fa9c3b0be6",
input_schema=ConditionBlock.Input,
output_schema=ConditionBlock.Output,
description="Handles conditional logic based on comparison operators",
categories={BlockCategory.LOGIC},
test_input={
"value1": 10,
"operator": ComparisonOperator.GREATER_THAN.value,
"value2": 5,
"yes_value": "Greater",
"no_value": "Not greater",
},
test_output=[
("result", True),
("yes_output", "Greater"),
],
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
value1 = input_data.value1
operator = input_data.operator
value2 = input_data.value2
yes_value = input_data.yes_value if input_data.yes_value is not None else value1
no_value = input_data.no_value if input_data.no_value is not None else value1
comparison_funcs = {
ComparisonOperator.EQUAL: lambda a, b: a == b,
ComparisonOperator.NOT_EQUAL: lambda a, b: a != b,
ComparisonOperator.GREATER_THAN: lambda a, b: a > b,
ComparisonOperator.LESS_THAN: lambda a, b: a < b,
ComparisonOperator.GREATER_THAN_OR_EQUAL: lambda a, b: a >= b,
ComparisonOperator.LESS_THAN_OR_EQUAL: lambda a, b: a <= b,
}
try:
result = comparison_funcs[operator](value1, value2)
yield "result", result
if result:
yield "yes_output", yes_value
else:
yield "no_output", no_value
except Exception:
yield "result", None
yield "yes_output", None
yield "no_output", None
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
import spacy
from jina import Document, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from ...spacy_text_encoder import SpacyTextEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_spacy_text_encoder():
# Input
docs = DocumentArray([Document(text='Han likes eating pizza'), Document(text='Han likes pizza'),
Document(text='Jina rocks')])
# Encoder embedding
encoder = SpacyTextEncoder()
encoder.encode(docs, parameters={})
# Compare with ouptut
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96, )
def test_spacy_text_encoder_traversal_paths():
# Input
docs = DocumentArray([Document(chunks=[Document(text='Han likes eating pizza'), Document(text='Han likes pizza')]),
Document(chunks=[Document(text='Jina rocks')])])
# Encoder embedding
encoder = SpacyTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': ['c']})
# Compare with ouptut
assert len(docs) == 2
assert len(docs[0].chunks) == 2
for chunk in docs[0].chunks:
assert chunk.embedding.shape == (96, )
assert len(docs[1].chunks) == 1
for chunk in docs[1].chunks:
assert chunk.embedding.shape == (96, )
def test_unsupported_lang(tmpdir):
dummy1 = spacy.blank('xx')
dummy1_dir_path = os.path.join(tmpdir, 'xx1')
dummy1.to_disk(dummy1_dir_path)
dummy2 = spacy.blank('xx')
dummy2_dir_path = os.path.join(tmpdir, 'xx2')
dummy2.to_disk(dummy2_dir_path)
# No available language
with pytest.raises(IOError):
SpacyTextEncoder('abcd')
# Language does not have DependencyParser should thrown an error
# when try to use default encoder
with pytest.raises(ValueError):
SpacyTextEncoder(dummy1_dir_path, use_default_encoder=True)
# And should be fine when 'parser' pipeline is added
dummy1.add_pipe('parser')
dummy1.to_disk(dummy1_dir_path)
SpacyTextEncoder(dummy1_dir_path, use_default_encoder=True)
# Language does not have SentenceRecognizer should thrown an error
# when try to use non default encoder
with pytest.raises(ValueError):
SpacyTextEncoder(dummy2_dir_path, use_default_encoder=False)
# And should be fine when 'senter' pipeline is added
dummy2.add_pipe('tok2vec')
dummy2.to_disk(dummy2_dir_path)
SpacyTextEncoder(dummy2_dir_path, use_default_encoder=False)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import pytest
import spacy
from jina import Document, DocumentArray
try:
from spacy_text_encoder import SpacyTextEncoder
except:
from jinahub.encoder.spacy_text_encoder import SpacyTextEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_spacy_text_encoder():
# Input
docs = DocumentArray([Document(text='Han likes eating pizza'), Document(text='Han likes pizza'),
Document(text='Jina rocks')])
# Encoder embedding
encoder = SpacyTextEncoder()
encoder.encode(docs, parameters={})
# Compare with ouptut
assert len(docs) == 3
for doc in docs:
assert doc.embedding.shape == (96, )
def test_spacy_text_encoder_traversal_paths():
# Input
docs = DocumentArray([Document(chunks=[Document(text='Han likes eating pizza'), Document(text='Han likes pizza')]),
Document(chunks=[Document(text='Jina rocks')])])
# Encoder embedding
encoder = SpacyTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': ['c']})
# Compare with ouptut
assert len(docs) == 2
assert len(docs[0].chunks) == 2
for chunk in docs[0].chunks:
assert chunk.embedding.shape == (96, )
assert len(docs[1].chunks) == 1
for chunk in docs[1].chunks:
assert chunk.embedding.shape == (96, )
def test_unsupported_lang(tmpdir):
dummy1 = spacy.blank('xx')
dummy1_dir_path = os.path.join(tmpdir, 'xx1')
dummy1.to_disk(dummy1_dir_path)
dummy2 = spacy.blank('xx')
dummy2_dir_path = os.path.join(tmpdir, 'xx2')
dummy2.to_disk(dummy2_dir_path)
# No available language
with pytest.raises(IOError):
SpacyTextEncoder('abcd')
# Language does not have DependencyParser should thrown an error
# when try to use default encoder
with pytest.raises(ValueError):
SpacyTextEncoder(dummy1_dir_path, use_default_encoder=True)
# And should be fine when 'parser' pipeline is added
dummy1.add_pipe('parser')
dummy1.to_disk(dummy1_dir_path)
SpacyTextEncoder(dummy1_dir_path, use_default_encoder=True)
# Language does not have SentenceRecognizer should thrown an error
# when try to use non default encoder
with pytest.raises(ValueError):
SpacyTextEncoder(dummy2_dir_path, use_default_encoder=False)
# And should be fine when 'senter' pipeline is added
dummy2.add_pipe('tok2vec')
dummy2.to_disk(dummy2_dir_path)
SpacyTextEncoder(dummy2_dir_path, use_default_encoder=False)
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls,
value: list[BaseMemory],
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory) and val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}",
stacklevel=5,
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
import warnings
from typing import Any
from langchain_core.memory import BaseMemory
from pydantic import field_validator
from langchain.memory.chat_memory import BaseChatMemory
class CombinedMemory(BaseMemory):
"""Combining multiple memories' data together."""
memories: list[BaseMemory]
"""For tracking all the memories that should be accessed."""
@field_validator("memories")
@classmethod
def check_repeated_memory_variable(
cls,
value: list[BaseMemory],
) -> list[BaseMemory]:
all_variables: set[str] = set()
for val in value:
overlap = all_variables.intersection(val.memory_variables)
if overlap:
msg = (
f"The same variables {overlap} are found in multiple"
"memory object, which is not allowed by CombinedMemory."
)
raise ValueError(msg)
all_variables |= set(val.memory_variables)
return value
@field_validator("memories")
@classmethod
def check_input_key(cls, value: list[BaseMemory]) -> list[BaseMemory]:
"""Check that if memories are of type BaseChatMemory that input keys exist."""
for val in value:
if isinstance(val, BaseChatMemory) and val.input_key is None:
warnings.warn(
"When using CombinedMemory, "
"input keys should be so the input is known. "
f" Was not set on {val}",
stacklevel=2,
)
return value
@property
def memory_variables(self) -> list[str]:
"""All the memory variables that this instance provides."""
"""Collected from the all the linked memories."""
memory_variables = []
for memory in self.memories:
memory_variables.extend(memory.memory_variables)
return memory_variables
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Load all vars from sub-memories."""
memory_data: dict[str, Any] = {}
# Collect vars from all sub-memories
for memory in self.memories:
data = memory.load_memory_variables(inputs)
for key, value in data.items():
if key in memory_data:
msg = f"The variable {key} is repeated in the CombinedMemory."
raise ValueError(msg)
memory_data[key] = value
return memory_data
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this session for every memory."""
# Save context for all sub-memories
for memory in self.memories:
memory.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear context from this session for every memory."""
for memory in self.memories:
memory.clear()
|
from ...models.controlnets.multicontrolnet import MultiControlNetModel
from ...utils import deprecate, logging
logger = logging.get_logger(__name__)
class MultiControlNetModel(MultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `MultiControlNetModel` from `diffusers.pipelines.controlnet.multicontrolnet` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.multicontrolnet import MultiControlNetModel`, instead."
deprecate("diffusers.pipelines.controlnet.multicontrolnet.MultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
from ...models.controlnets.multicontrolnet import MultiControlNetModel
from ...utils import deprecate, logging
logger = logging.get_logger(__name__)
class MultiControlNetModel(MultiControlNetModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `MultiControlNetModel` from `diffusers.pipelines.controlnet.multicontrolnet` is deprecated and this will be removed in a future version. Please use `from diffusers.models.controlnets.multicontrolnet import MultiControlNetModel`, instead."
deprecate("MultiControlNetModel", "0.34", deprecation_message)
super().__init__(*args, **kwargs)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=6,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (6 samples per GPU)
auto_scale_lr = dict(base_batch_size=48)
|
_base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
# model settings
model = dict(
type='CornerNet',
backbone=dict(
type='HourglassNet',
downsample_times=5,
num_stacks=2,
stage_channels=[256, 256, 384, 384, 384, 512],
stage_blocks=[2, 2, 2, 2, 2, 4],
norm_cfg=dict(type='BN', requires_grad=True)),
neck=None,
bbox_head=dict(
type='CornerHead',
num_classes=80,
in_channels=256,
num_feat_levels=2,
corner_emb_channels=1,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_embedding=dict(
type='AssociativeEmbeddingLoss',
pull_weight=0.10,
push_weight=0.10),
loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
# training and testing settings
train_cfg=None,
test_cfg=dict(
corner_topk=100,
local_maximum_kernel=3,
distance_threshold=0.5,
score_thr=0.05,
max_per_img=100,
nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
**img_norm_cfg),
dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=True,
transforms=[
dict(type='Resize'),
dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
test_mode=True,
test_pad_mode=['logical_or', 127],
**img_norm_cfg),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg', 'border')),
])
]
data = dict(
samples_per_gpu=6,
workers_per_gpu=3,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.export_lib import ExportArchive
|
# Copyright (c) Meta Platforms, Inc. and affiliates
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.meta import LlamaLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in LlamaLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.meta import LlamaLLM
def test_embedding_class():
names_of_base_classes = [b.__name__ for b in LlamaLLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import Demultiplexer, Filter, IterDataPipe, IterKeyZipper, LineReader, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
getitem,
hint_sharding,
hint_shuffling,
INFINITE_BUFFER_SIZE,
path_comparator,
read_categories_file,
)
from .._api import register_dataset, register_info
NAME = "food101"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Food101(Dataset):
"""Food 101 dataset
homepage="https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101",
"""
def __init__(self, root: Union[str, Path], *, split: str = "train", skip_integrity_check: bool = False) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
url="http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz",
sha256="d97d15e438b7f4498f96086a4f7e2fa42a32f2712e87d3295441b2b6314053a4",
preprocess="decompress",
)
]
def _classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
path = Path(data[0])
if path.parents[1].name == "images":
return 0
elif path.parents[0].name == "meta":
return 1
else:
return None
def _prepare_sample(self, data: Tuple[str, Tuple[str, BinaryIO]]) -> Dict[str, Any]:
id, (path, buffer) = data
return dict(
label=Label.from_category(id.split("/", 1)[0], categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _image_key(self, data: Tuple[str, Any]) -> str:
path = Path(data[0])
return path.relative_to(path.parents[1]).with_suffix("").as_posix()
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
archive_dp = resource_dps[0]
images_dp, split_dp = Demultiplexer(
archive_dp, 2, self._classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
)
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = hint_sharding(split_dp)
split_dp = hint_shuffling(split_dp)
dp = IterKeyZipper(
split_dp,
images_dp,
key_fn=getitem(),
ref_key_fn=self._image_key,
buffer_size=INFINITE_BUFFER_SIZE,
)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = LineReader(dp, decode=True, return_path=False)
return list(dp)
def __len__(self) -> int:
return 75_750 if self._split == "train" else 25_250
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
# MMEngine support the following two ways, users can choose
# according to convenience
# train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# testing settings
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron/resnet50_caffe')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output', # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)
|
from typing import TYPE_CHECKING
import tensorflow as tf
if TYPE_CHECKING: # pragma: no cover
from tensorflow import Tensor
import numpy
def _get_tf_device(device: str):
return tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
def cosine(
x_mat: 'Tensor', y_mat: 'Tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
with _get_tf_device(device):
normalize_a = tf.nn.l2_normalize(x_mat, 1, epsilon=eps)
normalize_b = tf.nn.l2_normalize(y_mat, 1, epsilon=eps)
distance = 1 - tf.matmul(normalize_a, normalize_b, transpose_b=True)
return distance.numpy()
def sqeuclidean(
x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: tensorflow array with ndim=2
:param y_mat: tensorflow array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
with _get_tf_device(device):
return tf.reduce_sum(
(tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2
).numpy()
def euclidean(x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: tensorflow array with ndim=2
:param y_mat: tensorflow array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
with _get_tf_device(device):
return tf.sqrt(
tf.reduce_sum((tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2)
).numpy()
|
from typing import TYPE_CHECKING
import tensorflow as tf
if TYPE_CHECKING:
from tensorflow import Tensor
import numpy
def _get_tf_device(device: str):
return tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
def cosine(
x_mat: 'Tensor', y_mat: 'Tensor', eps: float = 1e-7, device: str = 'cpu'
) -> 'numpy.ndarray':
"""Cosine distance between each row in x_mat and each row in y_mat.
:param x_mat: np.ndarray with ndim=2
:param y_mat: np.ndarray with ndim=2
:param eps: a small jitter to avoid divde by zero
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
with _get_tf_device(device):
normalize_a = tf.nn.l2_normalize(x_mat, 1, epsilon=eps)
normalize_b = tf.nn.l2_normalize(y_mat, 1, epsilon=eps)
distance = 1 - tf.matmul(normalize_a, normalize_b, transpose_b=True)
return distance.numpy()
def sqeuclidean(
x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu'
) -> 'numpy.ndarray':
"""Squared euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: tensorflow array with ndim=2
:param y_mat: tensorflow array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
with _get_tf_device(device):
return tf.reduce_sum(
(tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2
).numpy()
def euclidean(x_mat: 'Tensor', y_mat: 'Tensor', device: str = 'cpu') -> 'numpy.ndarray':
"""Euclidean distance between each row in x_mat and each row in y_mat.
:param x_mat: tensorflow array with ndim=2
:param y_mat: tensorflow array with ndim=2
:param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
:return: np.ndarray with ndim=2
"""
device = tf.device('/GPU:0') if device == 'cuda' else tf.device('/CPU:0')
with _get_tf_device(device):
return tf.sqrt(
tf.reduce_sum((tf.expand_dims(x_mat, 1) - tf.expand_dims(y_mat, 0)) ** 2, 2)
).numpy()
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py310
@pytest.fixture(
name="client",
params=[
"tutorial003_05",
pytest.param("tutorial003_05_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.response_model.{request.param}")
client = TestClient(mod.app)
return client
def test_get_portal(client: TestClient):
response = client.get("/portal")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Here's your interdimensional portal."}
def test_get_redirect(client: TestClient):
response = client.get("/portal", params={"teleport": True}, follow_redirects=False)
assert response.status_code == 307, response.text
assert response.headers["location"] == "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/portal": {
"get": {
"summary": "Get Portal",
"operationId": "get_portal_portal_get",
"parameters": [
{
"required": False,
"schema": {
"title": "Teleport",
"type": "boolean",
"default": False,
},
"name": "teleport",
"in": "query",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
|
from fastapi.testclient import TestClient
from docs_src.response_model.tutorial003_05 import app
client = TestClient(app)
def test_get_portal():
response = client.get("/portal")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Here's your interdimensional portal."}
def test_get_redirect():
response = client.get("/portal", params={"teleport": True}, follow_redirects=False)
assert response.status_code == 307, response.text
assert response.headers["location"] == "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/portal": {
"get": {
"summary": "Get Portal",
"operationId": "get_portal_portal_get",
"parameters": [
{
"required": False,
"schema": {
"title": "Teleport",
"type": "boolean",
"default": False,
},
"name": "teleport",
"in": "query",
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
|
from dataclasses import dataclass
from typing import List, Union
import numpy as np
import PIL.Image
import torch
from diffusers.utils import BaseOutput, get_logger
logger = get_logger(__name__)
@dataclass
class CosmosPipelineOutput(BaseOutput):
r"""
Output class for Cosmos any-to-world/video pipelines.
Args:
frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
`(batch_size, num_frames, channels, height, width)`.
"""
frames: torch.Tensor
@dataclass
class CosmosImagePipelineOutput(BaseOutput):
"""
Output class for Cosmos any-to-image pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
"""
images: Union[List[PIL.Image.Image], np.ndarray]
|
from dataclasses import dataclass
import torch
from diffusers.utils import BaseOutput
@dataclass
class CosmosPipelineOutput(BaseOutput):
r"""
Output class for Cosmos pipelines.
Args:
frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
`(batch_size, num_frames, channels, height, width)`.
"""
frames: torch.Tensor
|
from __future__ import annotations
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
from .PhraseTokenizer import PhraseTokenizer
from .WhitespaceTokenizer import WhitespaceTokenizer
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
__all__ = ["WordTokenizer", "WhitespaceTokenizer", "PhraseTokenizer", "ENGLISH_STOP_WORDS"]
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
import logging
import random
import numpy as np
import torch
from sklearn.decomposition import PCA
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, models
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Model for which we apply dimensionality reduction
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
# New size for the embeddings
new_dimension = 128
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
stsb_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
name="sts-test",
)
logging.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-score", split="train")
nli_sentences = train_dataset["sentence1"] + train_dataset["sentence2"]
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logging.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
model.save(f"{model_name}-128dim")
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
# Or you can push the model to the Hugging Face Hub
# model.push_to_hub(f'{model_name}-128dim')
|
"""
The pre-trained models produce embeddings of size 512 - 1024. However, when storing a large
number of embeddings, this requires quite a lot of memory / storage.
In this example, we reduce the dimensionality of the embeddings to e.g. 128 dimensions. This significantly
reduces the required memory / storage while maintaining nearly the same performance.
For dimensionality reduction, we compute embeddings for a large set of (representative) sentence. Then,
we use PCA to find e.g. 128 principle components of our vector space. This allows us to maintain
us much information as possible with only 128 dimensions.
PCA gives us a matrix that down-projects vectors to 128 dimensions. We use this matrix
and extend our original SentenceTransformer model with this linear downproject. Hence,
the new SentenceTransformer model will produce directly embeddings with 128 dimensions
without further changes needed.
"""
from datasets import load_dataset
from sklearn.decomposition import PCA
from sentence_transformers import SentenceTransformer, models
import logging
import random
import numpy as np
import torch
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Model for which we apply dimensionality reduction
model_name = "all-MiniLM-L6-v2"
model = SentenceTransformer(model_name)
# New size for the embeddings
new_dimension = 128
# We measure the performance of the original model
# and later we will measure the performance with the reduces dimension size
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
stsb_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
name="sts-test",
)
logging.info("Original model performance:")
stsb_evaluator(model)
######## Reduce the embedding dimensions ########
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-score", split="train")
nli_sentences = train_dataset["sentence1"] + train_dataset["sentence2"]
random.shuffle(nli_sentences)
# To determine the PCA matrix, we need some example sentence embeddings.
# Here, we compute the embeddings for 20k random sentences from the AllNLI dataset
pca_train_sentences = nli_sentences[0:20000]
train_embeddings = model.encode(pca_train_sentences, convert_to_numpy=True)
# Compute PCA on the train embeddings matrix
pca = PCA(n_components=new_dimension)
pca.fit(train_embeddings)
pca_comp = np.asarray(pca.components_)
# We add a dense layer to the model, so that it will produce directly embeddings with the new size
dense = models.Dense(
in_features=model.get_sentence_embedding_dimension(),
out_features=new_dimension,
bias=False,
activation_function=torch.nn.Identity(),
)
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca_comp))
model.add_module("dense", dense)
# Evaluate the model with the reduce embedding size
logging.info("Model with {} dimensions:".format(new_dimension))
stsb_evaluator(model)
# If you like, you can store the model on disc by uncommenting the following line
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
model.save(f"{model_name}-128dim")
# You can then load the adapted model that produces 128 dimensional embeddings like this:
# model = SentenceTransformer('models/my-128dim-model')
# Or you can push the model to the Hugging Face Hub
# model.push_to_hub(f'{model_name}-128dim')
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
"""
CSRReconstructionLoss implements the reconstruction loss component for Contrastive Sparse Representation (CSR) models.
This loss ensures that the sparse encoding can accurately reconstruct the original model embeddings through
three components:
1. A primary reconstruction loss (L_k) that measures the error between the original embedding and its
reconstruction using the top-k sparse components.
2. A secondary reconstruction loss (L_4k) that measures the error using the top-4k sparse components.
3. An auxiliary loss (L_aux) that helps to learn residual information.
Args:
model: SparseEncoder model with autoencoder components
beta: Weight for the auxiliary loss component (L_aux)
References:
- For more details, see the paper "Beyond Matryoshka: Revisiting Sparse Coding for Adaptive Representation"
https://arxiv.org/abs/2503.01776
Requirements:
1. The model must be configured to output the necessary reconstruction components
2. Used with SparseEncoder models that implement compositional sparse autoencoding
Relations:
- Used as a component within :class:`CSRLoss` combined with a contrastive loss
Example:
::
This loss is typically used within the :class:`CSRLoss` class, which combines it with other loss components.
"""
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(reconstruction: torch.Tensor, original_input: torch.Tensor) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class CSRReconstructionLoss(nn.Module):
"""
CSRReconstruction Loss module for Sparse AutoEncoder.
This module computes the reconstruction loss according to the formula:
L_recon = L(k) + L(4k)/8 + β*L_aux
where:
- L(k) = ||f(x) - f(dx)_k||₂²
- L(4k) = ||f(x) - f(dx)_4k||₂²
- L_aux = ||e - ê||₂², e = f(x) - f(dx), ê = W_dec*z
"""
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the CSRReconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the CSRReconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {"beta": self.beta}
|
import numpy as np
import keras
from keras import Model
from keras import initializers
from keras import layers
from keras import losses
from keras import metrics
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(
shape=(input_dim, self.units),
initializer=initializers.GlorotNormal(),
name="kernel",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,),
initializer=initializers.Zeros(),
name="bias",
trainable=True,
)
def call(self, inputs):
# Use Keras ops to create backend-agnostic layers/metrics/etc.
return ops.matmul(inputs, self.w) + self.b
class MyDropout(layers.Layer):
def __init__(self, rate, name=None):
super().__init__(name=name)
self.rate = rate
# Use seed_generator for managing RNG state.
# It is a state element and its seed variable is
# tracked as part of `layer.variables`.
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
# Use `keras.random` for random ops.
return keras.random.dropout(inputs, self.rate, seed=self.seed_generator)
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
self.dp = MyDropout(0.5)
def call(self, x):
x1 = self.dense1(x)
x2 = self.dense2(x)
# Why not use some ops here as well
x = ops.concatenate([x1, x2], axis=-1)
x = self.dp(x)
return self.dense3(x)
model = MyModel(hidden_dim=256, output_dim=16)
x = np.random.random((50000, 128))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 5
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(x, y, batch_size=batch_size, epochs=epochs)
model.summary()
print("History:")
print(history.history)
|
import numpy as np
import keras
from keras import Model
from keras import initializers
from keras import layers
from keras import losses
from keras import metrics
from keras import ops
from keras import optimizers
class MyDense(layers.Layer):
def __init__(self, units, name=None):
super().__init__(name=name)
self.units = units
def build(self, input_shape):
input_dim = input_shape[-1]
self.w = self.add_weight(
shape=(input_dim, self.units),
initializer=initializers.GlorotNormal(),
name="kernel",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,),
initializer=initializers.Zeros(),
name="bias",
trainable=True,
)
def call(self, inputs):
# Use Keras ops to create backend-agnostic layers/metrics/etc.
return ops.matmul(inputs, self.w) + self.b
class MyDropout(layers.Layer):
def __init__(self, rate, name=None):
super().__init__(name=name)
self.rate = rate
# Use seed_generator for managing RNG state.
# It is a state element and its seed variable is
# tracked as part of `layer.variables`.
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, inputs):
# Use `keras.random` for random ops.
return keras.random.dropout(
inputs, self.rate, seed=self.seed_generator
)
class MyModel(Model):
def __init__(self, hidden_dim, output_dim):
super().__init__()
self.dense1 = MyDense(hidden_dim)
self.dense2 = MyDense(hidden_dim)
self.dense3 = MyDense(output_dim)
self.dp = MyDropout(0.5)
def call(self, x):
x1 = self.dense1(x)
x2 = self.dense2(x)
# Why not use some ops here as well
x = ops.concatenate([x1, x2], axis=-1)
x = self.dp(x)
return self.dense3(x)
model = MyModel(hidden_dim=256, output_dim=16)
x = np.random.random((50000, 128))
y = np.random.random((50000, 16))
batch_size = 32
epochs = 5
model.compile(
optimizer=optimizers.SGD(learning_rate=0.001),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(x, y, batch_size=batch_size, epochs=epochs)
model.summary()
print("History:")
print(history.history)
|
import numpy as np
import torch
from docarray import Document, DocumentArray, Image, Text
from docarray.typing import (
AnyTensor,
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(Document):
tensor: NdArray
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: AnyTensor
generic_torch_tensor: AnyTensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
import numpy as np
import torch
from docarray import Document, DocumentArray, Image, Text
from docarray.typing import (
AnyUrl,
Embedding,
ImageUrl,
Mesh3DUrl,
NdArray,
PointCloud3DUrl,
Tensor,
TextUrl,
TorchEmbedding,
TorchTensor,
)
from docarray.typing.tensor import NdArrayEmbedding
def test_multi_modal_doc_proto():
class MyMultiModalDoc(Document):
image: Image
text: Text
class MySUperDoc(Document):
doc: MyMultiModalDoc
description: str
doc = MyMultiModalDoc(
image=Image(tensor=np.zeros((3, 224, 224))), text=Text(text='hello')
)
MyMultiModalDoc.from_protobuf(doc.to_protobuf())
def test_all_types():
class NestedDoc(Document):
tensor: NdArray
class MyDoc(Document):
img_url: ImageUrl
txt_url: TextUrl
mesh_url: Mesh3DUrl
point_cloud_url: PointCloud3DUrl
any_url: AnyUrl
torch_tensor: TorchTensor
torch_tensor_param: TorchTensor[224, 224, 3]
np_array: NdArray
np_array_param: NdArray[224, 224, 3]
generic_nd_array: Tensor
generic_torch_tensor: Tensor
embedding: Embedding
torch_embedding: TorchEmbedding[128]
np_embedding: NdArrayEmbedding[128]
nested_docs: DocumentArray[NestedDoc]
doc = MyDoc(
img_url='test.png',
txt_url='test.txt',
mesh_url='test.obj',
point_cloud_url='test.obj',
any_url='www.jina.ai',
torch_tensor=torch.zeros((3, 224, 224)),
torch_tensor_param=torch.zeros((3, 224, 224)),
np_array=np.zeros((3, 224, 224)),
np_array_param=np.zeros((3, 224, 224)),
generic_nd_array=np.zeros((3, 224, 224)),
generic_torch_tensor=torch.zeros((3, 224, 224)),
embedding=np.zeros((3, 224, 224)),
torch_embedding=torch.zeros((128,)),
np_embedding=np.zeros((128,)),
nested_docs=DocumentArray[NestedDoc]([NestedDoc(tensor=np.zeros((128,)))]),
)
doc = MyDoc.from_protobuf(doc.to_protobuf())
assert doc.img_url == 'test.png'
assert doc.txt_url == 'test.txt'
assert doc.mesh_url == 'test.obj'
assert doc.point_cloud_url == 'test.obj'
assert doc.any_url == 'www.jina.ai'
assert (doc.torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.torch_tensor, torch.Tensor)
assert (doc.torch_tensor_param == torch.zeros((224, 224, 3))).all()
assert isinstance(doc.torch_tensor_param, torch.Tensor)
assert (doc.np_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.np_array, np.ndarray)
assert doc.np_array.flags.writeable
assert (doc.np_array_param == np.zeros((224, 224, 3))).all()
assert isinstance(doc.np_array_param, np.ndarray)
assert (doc.generic_nd_array == np.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_nd_array, np.ndarray)
assert (doc.generic_torch_tensor == torch.zeros((3, 224, 224))).all()
assert isinstance(doc.generic_torch_tensor, torch.Tensor)
assert (doc.torch_embedding == torch.zeros((128,))).all()
assert isinstance(doc.torch_embedding, torch.Tensor)
assert (doc.np_embedding == np.zeros((128,))).all()
assert isinstance(doc.np_embedding, np.ndarray)
assert (doc.embedding == np.zeros((3, 224, 224))).all()
|
__version__ = '0.13.31'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.30'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import functools
from typing import (
Optional,
TYPE_CHECKING,
Iterable,
Callable,
Dict,
)
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray import Document
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import (
DocumentArraySourceType,
)
def needs_id2offset_rebuild(func) -> Callable:
# self._id2offset needs to be rebuilt after every insert or delete
# this flag allows to do it lazily and cache the result
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self._needs_id2offset_rebuild = True
return func(self, *args, **kwargs)
return wrapper
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
@property
def _id2offset(self) -> Dict[str, int]:
"""Return the `_id_to_index` map
:return: a Python dict.
"""
if self._needs_id2offset_rebuild:
self._rebuild_id2offset()
return self._id_to_index
def _rebuild_id2offset(self) -> None:
"""Update the id_to_index map by enumerating all Documents in self._data.
Very costy! Only use this function when self._data is dramtically changed.
"""
self._id_to_index = {
d.id: i for i, d in enumerate(self._data)
} # type: Dict[str, int]
self._needs_id2offset_rebuild = False
@needs_id2offset_rebuild
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
from docarray.array.memory import DocumentArrayInMemory
super()._init_storage(_docs, copy=copy, *args, **kwargs)
self._data = []
self._id_to_index = {}
if _docs is None:
return
elif isinstance(
_docs,
Iterable,
):
if copy:
self._data = [Document(d, copy=True) for d in _docs]
elif isinstance(_docs, DocumentArrayInMemory):
self._data = _docs._data
self._id_to_index = _docs._id2offset
self._needs_id2offset_rebuild = _docs._needs_id2offset_rebuild
else:
self.extend(_docs)
else:
if isinstance(_docs, Document):
if copy:
self.append(Document(_docs, copy=True))
else:
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
return config_joined
|
import functools
from typing import (
Optional,
TYPE_CHECKING,
Iterable,
Callable,
Dict,
)
from docarray.array.storage.base.backend import BaseBackendMixin
from docarray import Document
if TYPE_CHECKING:
from docarray.typing import (
DocumentArraySourceType,
)
def needs_id2offset_rebuild(func) -> Callable:
# self._id2offset needs to be rebuilt after every insert or delete
# this flag allows to do it lazily and cache the result
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self._needs_id2offset_rebuild = True
return func(self, *args, **kwargs)
return wrapper
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
@property
def _id2offset(self) -> Dict[str, int]:
"""Return the `_id_to_index` map
:return: a Python dict.
"""
if self._needs_id2offset_rebuild:
self._rebuild_id2offset()
return self._id_to_index
def _rebuild_id2offset(self) -> None:
"""Update the id_to_index map by enumerating all Documents in self._data.
Very costy! Only use this function when self._data is dramtically changed.
"""
self._id_to_index = {
d.id: i for i, d in enumerate(self._data)
} # type: Dict[str, int]
self._needs_id2offset_rebuild = False
@needs_id2offset_rebuild
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
copy: bool = False,
*args,
**kwargs
):
from docarray.array.memory import DocumentArrayInMemory
super()._init_storage(_docs, copy=copy, *args, **kwargs)
self._data = []
self._id_to_index = {}
if _docs is None:
return
elif isinstance(
_docs,
Iterable,
):
if copy:
self._data = [Document(d, copy=True) for d in _docs]
elif isinstance(_docs, DocumentArrayInMemory):
self._data = _docs._data
self._id_to_index = _docs._id2offset
self._needs_id2offset_rebuild = _docs._needs_id2offset_rebuild
else:
self.extend(_docs)
else:
if isinstance(_docs, Document):
if copy:
self.append(Document(_docs, copy=True))
else:
self.append(_docs)
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
return config_joined
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768),
(1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
_base_.train_dataloader.dataset.pipeline = train_pipeline
|
_base_ = './faster-rcnn_r50-caffe_c4-1x_coco.py'
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
def embed_inputs(
self,
model: SentenceTransformer,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]:
"""
Call the encodder methode of the model pass
Args:
model (SentenceTransformer): Model we are evaluating
sentences (str | list[str] | np.ndarray): Text that we are embedding
Returns:
list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associated embedding
"""
return model.encode(sentences, **kwargs)
|
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SentenceEvaluator:
"""
Base class for all evaluators. Notably, this class introduces the ``greater_is_better`` and ``primary_metric``
attributes. The former is a boolean indicating whether a higher evaluation score is better, which is used
for choosing the best checkpoint if ``load_best_model_at_end`` is set to ``True`` in the training arguments.
The latter is a string indicating the primary metric for the evaluator. This has to be defined whenever
the evaluator returns a dictionary of metrics, and the primary metric is the key pointing to the primary
metric, i.e. the one that is used for model selection and/or logging.
Extend this class and implement __call__ for custom evaluators.
"""
def __init__(self):
self.greater_is_better = True
self.primary_metric = None
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> float | dict[str, float]:
"""
This is called during training to evaluate the model.
It returns a score for the evaluation with a higher score indicating a better result.
Args:
model: the model to evaluate
output_path: path where predictions and metrics are written
to
epoch: the epoch where the evaluation takes place. This is
used for the file prefixes. If this is -1, then we
assume evaluation on test data.
steps: the steps in the current epoch at time of the
evaluation. This is used for the file prefixes. If this
is -1, then we assume evaluation at the end of the
epoch.
Returns:
Either a score for the evaluation with a higher score
indicating a better result, or a dictionary with scores. If
the latter is chosen, then `evaluator.primary_metric` must
be defined
"""
pass
def prefix_name_to_metrics(self, metrics: dict[str, float], name: str) -> dict[str, float]:
def maybe_to_float(value: Any) -> Any:
try:
return float(value)
except ValueError:
return value
if not name:
return {key: maybe_to_float(value) for key, value in metrics.items()}
metrics = {name + "_" + key: maybe_to_float(value) for key, value in metrics.items()}
if hasattr(self, "primary_metric") and not self.primary_metric.startswith(name + "_"):
self.primary_metric = name + "_" + self.primary_metric
return metrics
def store_metrics_in_model_card_data(
self, model: SentenceTransformer, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
@property
def description(self) -> str:
"""
Returns a human-readable description of the evaluator: BinaryClassificationEvaluator -> Binary Classification
1. Replace "CE" prefix with "CrossEncoder"
2. Remove "Evaluator" from the class name
3. Add a space before every capital letter
"""
class_name = self.__class__.__name__
if class_name.startswith("CE"):
class_name = "CrossEncoder" + class_name[2:]
try:
index = class_name.index("Evaluator")
class_name = class_name[:index]
except IndexError:
pass
return re.sub(r"([a-z])([A-Z])", r"\g<1> \g<2>", class_name)
def get_config_dict(self) -> dict[str, Any]:
"""
Return a dictionary with all meaningful configuration values of the evaluator to store in the model card.
"""
return {}
def embed_inputs(
self,
model: SentenceTransformer,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]:
"""
Call the encodder methode of the model pass
Args:
model (SentenceTransformer): Model we are evaluating
sentences (str | list[str] | np.ndarray): Text that we are embedding
Returns:
list[Tensor] | np.ndarray | Tensor | dict[str, Tensor] | list[dict[str, Tensor]]: The associate Embedding
"""
return model.encode(sentences, **kwargs)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.10.3'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(init_cfg=None),
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=8,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=8,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
# [7] yields higher performance than [6]
step=[7])
runner = dict(
type='EpochBasedRunner', max_epochs=8) # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth' # noqa
|
from typing import Dict
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]) -> Dict[str, Tensor]:
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path) -> None:
pass
@staticmethod
def load(input_path) -> "Normalize":
return Normalize()
|
from typing import Dict
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self):
super(Normalize, self).__init__()
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path):
pass
@staticmethod
def load(input_path):
return Normalize()
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
**Author**: `Moto Hira <moto@meta.com>`__
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import librosa
import matplotlib.pyplot as plt
from torchaudio.utils import download_asset
SAMPLE_WAV_SPEECH_PATH = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
rate = 1.2
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
plot_spectrogram(torch.abs(spec[0]), title="Original", aspect="equal", xmax=304)
rate = 0.9
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
######################################################################
# TimeMasking
# -----------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.TimeMasking(time_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along time axis")
######################################################################
# FrequencyMasking
# ----------------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.FrequencyMasking(freq_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along frequency axis")
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import librosa
import matplotlib.pyplot as plt
from torchaudio.utils import download_asset
SAMPLE_WAV_SPEECH_PATH = download_asset("tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements :py:func:`torchaudio.transforms.TimeStretch`,
# :py:func:`torchaudio.transforms.TimeMasking` and
# :py:func:`torchaudio.transforms.FrequencyMasking`.
#
######################################################################
# TimeStretch
# -----------
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
rate = 1.2
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
plot_spectrogram(torch.abs(spec[0]), title="Original", aspect="equal", xmax=304)
rate = 0.9
spec_ = stretch(spec, rate)
plot_spectrogram(torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304)
######################################################################
# TimeMasking
# -----------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.TimeMasking(time_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along time axis")
######################################################################
# FrequencyMasking
# ----------------
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.FrequencyMasking(freq_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along frequency axis")
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.pretrained_model_name_or_path == 'distilbert-base-uncased'
def test_tf_batch(docs_generator):
encoder = TransformerTFTextEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (target_dim,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTFTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(
pytest.lazy_fixture('docs_with_text'),
[[['r'], 10], [['c'], 0], [['cc'], 0]],
['r'],
),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[[['r'], 1], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[[['r'], 1], [['c'], 1], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path):
encoder = TransformerTFTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': traversal_path})
for path, count in docs_per_path:
assert len(docs.traverse_flat(path).get_attributes("embedding")) == count
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...transformer_tf_text_encode import TransformerTFTextEncoder
target_dim = 768
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.pretrained_model_name_or_path == 'distilbert-base-uncased'
def test_tf_batch(docs_generator):
encoder = TransformerTFTextEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (target_dim,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences['A'] = 'Hello, my name is Michael.'
sentences['B'] = 'Today we are going to Disney World.'
sentences['C'] = 'There are animals on the road'
sentences['D'] = 'A dog is running down the road'
encoder = TransformerTFTextEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('C', 'D')
assert small_distance < dist('C', 'B')
assert small_distance < dist('C', 'A')
assert small_distance < dist('B', 'A')
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_path'],
[
(
pytest.lazy_fixture('docs_with_text'),
[[['r'], 10], [['c'], 0], [['cc'], 0]],
['r'],
),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[[['r'], 0], [['c'], 10], [['cc'], 0]],
['c'],
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[[['r'], 0], [['c'], 0], [['cc'], 10]],
['cc'],
),
],
)
def test_traversal_path(docs: DocumentArray, docs_per_path, traversal_path):
encoder = TransformerTFTextEncoder()
encoder.encode(docs, parameters={'traversal_paths': traversal_path})
for path, count in docs_per_path:
assert len(docs.traverse_flat(path).get_attributes("embedding")) == count
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDocument):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDocument):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
'my/local/file.mp3',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='VideoUrl'):
parse_obj_as(VideoUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert len(video_bytes) > 0
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDocument):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
'my/local/file.mp3',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='VideoUrl'):
parse_obj_as(VideoUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert len(video_bytes) > 0
|
"""Test the criteria eval chain."""
import pytest
from langchain.evaluation.criteria.eval_chain import (
_SUPPORTED_CRITERIA,
Criteria,
CriteriaEvalChain,
CriteriaResultOutputParser,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_resolve_criteria_str() -> None:
assert CriteriaEvalChain.resolve_criteria("helpfulness") == {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]
}
assert CriteriaEvalChain.resolve_criteria("correctness") == {
"correctness": _SUPPORTED_CRITERIA[Criteria.CORRECTNESS]
}
@pytest.mark.parametrize(
"text,want",
[
("Y", {"reasoning": "", "value": "Y", "score": 1}),
(
"""Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.
Y""",
{
"reasoning": """Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.""", # noqa: E501
"value": "Y",
"score": 1,
},
),
(
" NThe submission N is correct, accurate, and factual. It accurately"
" identifies the specific effects of knowledge and interest on"
" these factors. Therefore, the submission Y meets the criteria. Y",
{
"reasoning": "NThe submission N is correct, accurate, and factual. It"
" accurately identifies the specific effects of knowledge and interest"
" on these factors. Therefore, the submission Y meets the criteria.",
"value": "Y",
"score": 1,
},
),
],
)
def test_CriteriaResultOutputParser_parse(text: str, want: dict) -> None:
output_parser = CriteriaResultOutputParser()
got = output_parser.parse(text)
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
assert CriteriaEvalChain.resolve_criteria(criterion) == {
criterion.value: _SUPPORTED_CRITERIA[criterion]
}
def test_criteria_eval_chain() -> None:
chain = CriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"}, sequential_responses=True
),
criteria={"my criterion": "my criterion description"},
)
with pytest.warns(UserWarning, match=chain._skip_reference_warning):
result = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert result["reasoning"] == "The meaning of life"
def test_criteria_eval_chain_missing_reference() -> None:
chain = LabeledCriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"},
sequential_responses=True,
),
criteria={"my criterion": "my criterion description"},
)
with pytest.raises(ValueError):
chain.evaluate_strings(prediction="my prediction", input="my input")
def test_implements_string_protocol() -> None:
assert issubclass(CriteriaEvalChain, StringEvaluator)
|
"""Test the criteria eval chain."""
import pytest
from langchain.evaluation.criteria.eval_chain import (
_SUPPORTED_CRITERIA,
Criteria,
CriteriaEvalChain,
CriteriaResultOutputParser,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_resolve_criteria_str() -> None:
# type: ignore
assert CriteriaEvalChain.resolve_criteria("helpfulness") == {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]
}
assert CriteriaEvalChain.resolve_criteria("correctness") == {
"correctness": _SUPPORTED_CRITERIA[Criteria.CORRECTNESS]
}
@pytest.mark.parametrize(
"text,want",
[
("Y", {"reasoning": "", "value": "Y", "score": 1}),
(
"""Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.
Y""",
{
"reasoning": """Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.""", # noqa: E501
"value": "Y",
"score": 1,
},
),
(
" NThe submission N is correct, accurate, and factual. It accurately"
" identifies the specific effects of knowledge and interest on"
" these factors. Therefore, the submission Y meets the criteria. Y",
{
"reasoning": "NThe submission N is correct, accurate, and factual. It"
" accurately identifies the specific effects of knowledge and interest"
" on these factors. Therefore, the submission Y meets the criteria.",
"value": "Y",
"score": 1,
},
),
],
)
def test_CriteriaResultOutputParser_parse(text: str, want: dict) -> None:
output_parser = CriteriaResultOutputParser()
got = output_parser.parse(text)
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
assert CriteriaEvalChain.resolve_criteria(criterion) == {
criterion.value: _SUPPORTED_CRITERIA[criterion]
}
def test_criteria_eval_chain() -> None:
chain = CriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"}, sequential_responses=True
),
criteria={"my criterion": "my criterion description"},
)
with pytest.warns(UserWarning, match=chain._skip_reference_warning):
result = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert result["reasoning"] == "The meaning of life"
def test_criteria_eval_chain_missing_reference() -> None:
chain = LabeledCriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"},
sequential_responses=True,
),
criteria={"my criterion": "my criterion description"},
)
with pytest.raises(ValueError):
chain.evaluate_strings(prediction="my prediction", input="my input")
def test_implements_string_protocol() -> None:
assert issubclass(CriteriaEvalChain, StringEvaluator)
|
_base_ = 'deformable-detr_refine_r50_16xb2-50e_coco.py'
model = dict(as_two_stage=True)
|
_base_ = 'deformable-detr_refine_r50_16xb2-50e_coco.py'
model = dict(bbox_head=dict(as_two_stage=True))
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit, UsageTransactionMetadata
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
reason=f"Ran block {entry.block_id} {block.name}",
),
)
return cost
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(loop_scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import UsageTransactionMetadata, block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
reason=f"Ran block {entry.block_id} {block.name}",
),
)
return cost
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(loop_scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
import logging
import tempfile
import typing
import autogpt_libs.auth.depends
import fastapi
import fastapi.responses
import prisma.enums
import backend.server.v2.store.db
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
import backend.util.json
logger = logging.getLogger(__name__)
router = fastapi.APIRouter(prefix="/admin", tags=["store", "admin"])
@router.get(
"/listings",
summary="Get Admin Listings History",
response_model=backend.server.v2.store.model.StoreListingsWithVersionsResponse,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def get_admin_listings_with_versions(
status: typing.Optional[prisma.enums.SubmissionStatus] = None,
search: typing.Optional[str] = None,
page: int = 1,
page_size: int = 20,
):
"""
Get store listings with their version history for admins.
This provides a consolidated view of listings with their versions,
allowing for an expandable UI in the admin dashboard.
Args:
status: Filter by submission status (PENDING, APPROVED, REJECTED)
search: Search by name, description, or user email
page: Page number for pagination
page_size: Number of items per page
Returns:
StoreListingsWithVersionsResponse with listings and their versions
"""
try:
listings = await backend.server.v2.store.db.get_admin_listings_with_versions(
status=status,
search_query=search,
page=page,
page_size=page_size,
)
return listings
except Exception as e:
logger.exception("Error getting admin listings with versions: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={
"detail": "An error occurred while retrieving listings with versions"
},
)
@router.post(
"/submissions/{store_listing_version_id}/review",
summary="Review Store Submission",
response_model=backend.server.v2.store.model.StoreSubmission,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def review_submission(
store_listing_version_id: str,
request: backend.server.v2.store.model.ReviewSubmissionRequest,
user: typing.Annotated[
autogpt_libs.auth.models.User,
fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user),
],
):
"""
Review a store listing submission.
Args:
store_listing_version_id: ID of the submission to review
request: Review details including approval status and comments
user: Authenticated admin user performing the review
Returns:
StoreSubmission with updated review information
"""
try:
submission = await backend.server.v2.store.db.review_store_submission(
store_listing_version_id=store_listing_version_id,
is_approved=request.is_approved,
external_comments=request.comments,
internal_comments=request.internal_comments or "",
reviewer_id=user.user_id,
)
return submission
except Exception as e:
logger.exception("Error reviewing submission: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={"detail": "An error occurred while reviewing the submission"},
)
@router.get(
"/submissions/download/{store_listing_version_id}",
summary="Admin Download Agent File",
tags=["store", "admin"],
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def admin_download_agent_file(
user: typing.Annotated[
autogpt_libs.auth.models.User,
fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user),
],
store_listing_version_id: str = fastapi.Path(
..., description="The ID of the agent to download"
),
) -> fastapi.responses.FileResponse:
"""
Download the agent file by streaming its content.
Args:
store_listing_version_id (str): The ID of the agent to download
Returns:
StreamingResponse: A streaming response containing the agent's graph data.
Raises:
HTTPException: If the agent is not found or an unexpected error occurs.
"""
graph_data = await backend.server.v2.store.db.get_agent(
user_id=user.user_id,
store_listing_version_id=store_listing_version_id,
)
file_name = f"agent_{graph_data.id}_v{graph_data.version or 'latest'}.json"
# Sending graph as a stream (similar to marketplace v1)
with tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
) as tmp_file:
tmp_file.write(backend.util.json.dumps(graph_data))
tmp_file.flush()
return fastapi.responses.FileResponse(
tmp_file.name, filename=file_name, media_type="application/json"
)
|
import logging
import tempfile
import typing
import autogpt_libs.auth.depends
import fastapi
import fastapi.responses
import prisma.enums
import backend.server.v2.store.db
import backend.server.v2.store.exceptions
import backend.server.v2.store.model
import backend.util.json
logger = logging.getLogger(__name__)
router = fastapi.APIRouter(prefix="/admin", tags=["store", "admin"])
@router.get(
"/listings",
response_model=backend.server.v2.store.model.StoreListingsWithVersionsResponse,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def get_admin_listings_with_versions(
status: typing.Optional[prisma.enums.SubmissionStatus] = None,
search: typing.Optional[str] = None,
page: int = 1,
page_size: int = 20,
):
"""
Get store listings with their version history for admins.
This provides a consolidated view of listings with their versions,
allowing for an expandable UI in the admin dashboard.
Args:
status: Filter by submission status (PENDING, APPROVED, REJECTED)
search: Search by name, description, or user email
page: Page number for pagination
page_size: Number of items per page
Returns:
StoreListingsWithVersionsResponse with listings and their versions
"""
try:
listings = await backend.server.v2.store.db.get_admin_listings_with_versions(
status=status,
search_query=search,
page=page,
page_size=page_size,
)
return listings
except Exception as e:
logger.exception("Error getting admin listings with versions: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={
"detail": "An error occurred while retrieving listings with versions"
},
)
@router.post(
"/submissions/{store_listing_version_id}/review",
response_model=backend.server.v2.store.model.StoreSubmission,
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def review_submission(
store_listing_version_id: str,
request: backend.server.v2.store.model.ReviewSubmissionRequest,
user: typing.Annotated[
autogpt_libs.auth.models.User,
fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user),
],
):
"""
Review a store listing submission.
Args:
store_listing_version_id: ID of the submission to review
request: Review details including approval status and comments
user: Authenticated admin user performing the review
Returns:
StoreSubmission with updated review information
"""
try:
submission = await backend.server.v2.store.db.review_store_submission(
store_listing_version_id=store_listing_version_id,
is_approved=request.is_approved,
external_comments=request.comments,
internal_comments=request.internal_comments or "",
reviewer_id=user.user_id,
)
return submission
except Exception as e:
logger.exception("Error reviewing submission: %s", e)
return fastapi.responses.JSONResponse(
status_code=500,
content={"detail": "An error occurred while reviewing the submission"},
)
@router.get(
"/submissions/download/{store_listing_version_id}",
tags=["store", "admin"],
dependencies=[fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user)],
)
async def admin_download_agent_file(
user: typing.Annotated[
autogpt_libs.auth.models.User,
fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user),
],
store_listing_version_id: str = fastapi.Path(
..., description="The ID of the agent to download"
),
) -> fastapi.responses.FileResponse:
"""
Download the agent file by streaming its content.
Args:
store_listing_version_id (str): The ID of the agent to download
Returns:
StreamingResponse: A streaming response containing the agent's graph data.
Raises:
HTTPException: If the agent is not found or an unexpected error occurs.
"""
graph_data = await backend.server.v2.store.db.get_agent(
user_id=user.user_id,
store_listing_version_id=store_listing_version_id,
)
file_name = f"agent_{graph_data.id}_v{graph_data.version or 'latest'}.json"
# Sending graph as a stream (similar to marketplace v1)
with tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
) as tmp_file:
tmp_file.write(backend.util.json.dumps(graph_data))
tmp_file.flush()
return fastapi.responses.FileResponse(
tmp_file.name, filename=file_name, media_type="application/json"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
Args:
action_candidate (tuple): Action candidates. "normal", "horizontal", \
"vertical", "skip" are supported. Default: ('normal', \
'horizontal', 'skip').
action_prob (tuple): Corresponding action probabilities. Should be \
the same length as action_candidate. Default: (1, 0, 0).
scale (tuple): (min scale, max scale). Default: (0.8, 1.2).
dx (int): The maximum x-axis shift will be (instance width) / dx.
Default 15.
dy (int): The maximum y-axis shift will be (instance height) / dy.
Default 15.
theta (tuple): (min rotation degree, max rotation degree). \
Default: (-1, 1).
color_prob (float): Probability of images for color augmentation.
Default 0.5.
heatmap_flag (bool): Whether to use heatmap guided. Default False.
aug_ratio (float): Probability of applying this transformation. \
Default 0.5.
"""
def __init__(self,
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
ori_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(ori_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from ..builder import PIPELINES
@PIPELINES.register_module()
class InstaBoost:
r"""Data augmentation method in `InstaBoost: Boosting Instance
Segmentation Via Probability Map Guided Copy-Pasting
<https://arxiv.org/abs/1908.07801>`_.
Refer to https://github.com/GothicAi/Instaboost for implementation details.
Args:
action_candidate (tuple): Action candidates. "normal", "horizontal", \
"vertical", "skip" are supported. Default: ('normal', \
'horizontal', 'skip').
action_prob (tuple): Corresponding action probabilities. Should be \
the same length as action_candidate. Default: (1, 0, 0).
scale (tuple): (min scale, max scale). Default: (0.8, 1.2).
dx (int): The maximum x-axis shift will be (instance width) / dx.
Default 15.
dy (int): The maximum y-axis shift will be (instance height) / dy.
Default 15.
theta (tuple): (min rotation degree, max rotation degree). \
Default: (-1, 1).
color_prob (float): Probability of images for color augmentation.
Default 0.5.
heatmap_flag (bool): Whether to use heatmap guided. Default False.
aug_ratio (float): Probability of applying this transformation. \
Default 0.5.
"""
def __init__(self,
action_candidate=('normal', 'horizontal', 'skip'),
action_prob=(1, 0, 0),
scale=(0.8, 1.2),
dx=15,
dy=15,
theta=(-1, 1),
color_prob=0.5,
hflag=False,
aug_ratio=0.5):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError(
'Please run "pip install instaboostfast" '
'to install instaboostfast first for instaboost augmentation.')
self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
scale, dx, dy, theta,
color_prob, hflag)
self.aug_ratio = aug_ratio
def _load_anns(self, results):
labels = results['ann_info']['labels']
masks = results['ann_info']['masks']
bboxes = results['ann_info']['bboxes']
n = len(labels)
anns = []
for i in range(n):
label = labels[i]
bbox = bboxes[i]
mask = masks[i]
x1, y1, x2, y2 = bbox
# assert (x2 - x1) >= 1 and (y2 - y1) >= 1
bbox = [x1, y1, x2 - x1, y2 - y1]
anns.append({
'category_id': label,
'segmentation': mask,
'bbox': bbox
})
return anns
def _parse_anns(self, results, anns, img):
gt_bboxes = []
gt_labels = []
gt_masks_ann = []
for ann in anns:
x1, y1, w, h = ann['bbox']
# TODO: more essential bug need to be fixed in instaboost
if w <= 0 or h <= 0:
continue
bbox = [x1, y1, x1 + w, y1 + h]
gt_bboxes.append(bbox)
gt_labels.append(ann['category_id'])
gt_masks_ann.append(ann['segmentation'])
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
results['ann_info']['labels'] = gt_labels
results['ann_info']['bboxes'] = gt_bboxes
results['ann_info']['masks'] = gt_masks_ann
results['img'] = img
return results
def __call__(self, results):
img = results['img']
orig_type = img.dtype
anns = self._load_anns(results)
if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
try:
import instaboostfast as instaboost
except ImportError:
raise ImportError('Please run "pip install instaboostfast" '
'to install instaboostfast first.')
anns, img = instaboost.get_new_data(
anns, img.astype(np.uint8), self.cfg, background=None)
results = self._parse_anns(results, anns, img.astype(orig_type))
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
return repr_str
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
assert isinstance(tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert str(proto).startswith('audio_url')
|
from typing import Optional
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.document.io.json import orjson_dumps
from docarray.typing import AudioNdArray, AudioTorchTensor, AudioUrl
from tests import TOYDATA_DIR
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor = uri.load()
assert isinstance(tensor, np.ndarray)
assert isinstance(tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDocument):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor = doc.audio_url.load()
assert isinstance(doc.tensor, np.ndarray)
assert isinstance(doc.tensor, AudioNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert str(proto).startswith('audio_url')
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
to_float32=True,
backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False,
backend_args={{_base_.backend_args}})
test_evaluator = val_evaluator
|
_base_ = ['./mask2former_r50_8xb2-lsj-50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
image_size = (1024, 1024)
batch_augments = [
dict(
type='BatchFixedSizePad',
size=image_size,
img_pad_value=0,
pad_mask=True,
mask_pad_value=0,
pad_seg=False)
]
data_preprocessor = dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
pad_mask=True,
mask_pad_value=0,
pad_seg=False,
batch_augments=batch_augments)
model = dict(
data_preprocessor=data_preprocessor,
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
# large scale jittering
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
resize_type='Resize',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
pipeline=train_pipeline))
val_dataloader = dict(
dataset=dict(
type=dataset_type,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
_delete_=True,
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
|
from typing import Dict, Iterable
import torch
from torch import Tensor, nn
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
import torch
from torch import nn, Tensor
from typing import Iterable, Dict
from ..SentenceTransformer import SentenceTransformer
from .. import util
class MultipleNegativesSymmetricRankingLoss(nn.Module):
def __init__(self, model: SentenceTransformer, scale: float = 20.0, similarity_fct=util.cos_sim):
"""
This loss is an adaptation of MultipleNegativesRankingLoss. MultipleNegativesRankingLoss computes the following loss:
For a given anchor and a list of candidates, find the positive candidate.
In MultipleNegativesSymmetricRankingLoss, we add another loss term: Given the positive and a list of all anchors,
find the correct (matching) anchor.
For the example of question-answering: You have (question, answer)-pairs. MultipleNegativesRankingLoss just computes
the loss to find the answer for a given question. MultipleNegativesSymmetricRankingLoss additionally computes the
loss to find the question for a given answer.
Note: If you pass triplets, the negative entry will be ignored. A anchor is just searched for the positive.
Args:
model: SentenceTransformer model
scale: Output of similarity function is multiplied by scale
value
similarity_fct: similarity function between sentence
embeddings. By default, cos_sim. Can also be set to dot
product (and then set scale to 1)
Requirements:
1. (anchor, positive) pairs
Relations:
- Like :class:`MultipleNegativesRankingLoss`, but with an additional loss term.
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
})
loss = losses.MultipleNegativesSymmetricRankingLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(MultipleNegativesSymmetricRankingLoss, self).__init__()
self.model = model
self.scale = scale
self.similarity_fct = similarity_fct
self.cross_entropy_loss = nn.CrossEntropyLoss()
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
anchor = reps[0]
candidates = torch.cat(reps[1:])
scores = self.similarity_fct(anchor, candidates) * self.scale
labels = torch.tensor(
range(len(scores)), dtype=torch.long, device=scores.device
) # Example a[i] should match with b[i]
anchor_positive_scores = scores[:, 0 : len(reps[1])]
forward_loss = self.cross_entropy_loss(scores, labels)
backward_loss = self.cross_entropy_loss(anchor_positive_scores.transpose(0, 1), labels)
return (forward_loss + backward_loss) / 2
def get_config_dict(self):
return {"scale": self.scale, "similarity_fct": self.similarity_fct.__name__}
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* :cite:`Mir2015QUESST2014EQ` Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
if subset not in ["docs", "dev", "eval"]:
raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']")
if language is not None and language not in _LANGUAGES:
raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}")
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def _load_sample(self, n: int) -> Tuple[torch.Tensor, int, str]:
audio_path = self.data[n]
wav, sample_rate = torchaudio.load(audio_path)
return wav, sample_rate, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, file_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
import os
import re
from pathlib import Path
from typing import Optional, Tuple, Union
import torch
import torchaudio
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
URL = "https://speech.fit.vutbr.cz/files/quesst14Database.tgz"
_CHECKSUM = "4f869e06bc066bbe9c5dde31dbd3909a0870d70291110ebbb38878dcbc2fc5e4"
_LANGUAGES = [
"albanian",
"basque",
"czech",
"nnenglish",
"romanian",
"slovak",
]
class QUESST14(Dataset):
"""Create *QUESST14* [:footcite:`Mir2015QUESST2014EQ`] Dataset
Args:
root (str or Path): Root directory where the dataset's top level directory is found
subset (str): Subset of the dataset to use. Options: [``"docs"``, ``"dev"``, ``"eval"``].
language (str or None, optional): Language to get dataset for.
Options: [``None``, ``albanian``, ``basque``, ``czech``, ``nnenglish``, ``romanian``, ``slovak``].
If ``None``, dataset consists of all languages. (default: ``"nnenglish"``)
download (bool, optional): Whether to download the dataset if it is not found at root path.
(default: ``False``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str,
language: Optional[str] = "nnenglish",
download: bool = False,
) -> None:
if subset not in ["docs", "dev", "eval"]:
raise ValueError("`subset` must be one of ['docs', 'dev', 'eval']")
if language is not None and language not in _LANGUAGES:
raise ValueError(f"`language` must be None or one of {str(_LANGUAGES)}")
# Get string representation of 'root'
root = os.fspath(root)
basename = os.path.basename(URL)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
self._path = os.path.join(root, basename)
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
download_url_to_file(URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
if subset == "docs":
self.data = filter_audio_paths(self._path, language, "language_key_utterances.lst")
elif subset == "dev":
self.data = filter_audio_paths(self._path, language, "language_key_dev.lst")
elif subset == "eval":
self.data = filter_audio_paths(self._path, language, "language_key_eval.lst")
def _load_sample(self, n: int) -> Tuple[torch.Tensor, int, str]:
audio_path = self.data[n]
wav, sample_rate = torchaudio.load(audio_path)
return wav, sample_rate, audio_path.with_suffix("").name
def __getitem__(self, n: int) -> Tuple[torch.Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, file_name)``
"""
return self._load_sample(n)
def __len__(self) -> int:
return len(self.data)
def filter_audio_paths(
path: str,
language: str,
lst_name: str,
):
"""Extract audio paths for the given language."""
audio_paths = []
path = Path(path)
with open(path / "scoring" / lst_name) as f:
for line in f:
audio_path, lang = line.strip().split()
if language is not None and lang != language:
continue
audio_path = re.sub(r"^.*?\/", "", audio_path)
audio_paths.append(path / audio_path)
return audio_paths
|
import os
import random
import time
from typing import Dict
import numpy as np
import pytest
from jina import Document, Flow, DocumentArray, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.searcher.compound.NumpyLMDBSearcher.npfile import NumpyLMDBSearcher
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
from tests.integration.psql_dump_reload.test_dump_psql import (
MatchMerger,
)
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 30
class TagMatchMerger(MatchMerger):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
MatchMerger.merge(
self, docs_matrix=docs_matrix, parameters=parameters, **kwargs
)
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class NumpyTaggingFileSearcher(NumpyLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim])
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
# TODO: add num_shards=7
@pytest.mark.parametrize('num_shards', (2, 3))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
import os
import random
import time
from typing import Dict
import numpy as np
import pytest
from jina import Document, Flow, DocumentArray, requests
from jina_commons.indexers.dump import dump_docs
from jinahub.indexers.searcher.compound.NumpyLMDBSearcher import NumpyLMDBSearcher
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
from tests.integration.psql_dump_reload.test_dump_psql import (
MatchMerger,
)
random.seed(0)
np.random.seed(0)
cur_dir = os.path.dirname(os.path.abspath(__file__))
ORIGIN_TAG = 'origin'
TOP_K = 30
class TagMatchMerger(MatchMerger):
@requests(on='/tag_search')
def merge(self, docs_matrix, parameters: Dict, **kwargs):
MatchMerger.merge(
self, docs_matrix=docs_matrix, parameters=parameters, **kwargs
)
class TaggingFileSearcher(LMDBStorage):
def __init__(
self,
**kwargs,
):
super().__init__(**kwargs)
def search(self, docs: DocumentArray, parameters: Dict = None, **kwargs) -> None:
# TODO shouldn't be necessary
parameters = {'traversal_paths': ['m']}
LMDBStorage.search(self, docs, parameters=parameters, **kwargs)
for doc in docs:
for match in doc.matches:
match.tags[ORIGIN_TAG] = self.runtime_args.pea_id
class NumpyTaggingFileSearcher(NumpyLMDBSearcher):
def __init__(
self,
dump_path=None,
**kwargs,
):
super().__init__(**kwargs)
self._kv_indexer = TaggingFileSearcher(dump_path=dump_path, **kwargs)
@requests(on='/tag_search')
def search(self, docs: 'DocumentArray', parameters: Dict = None, **kwargs):
super().search(docs, parameters, **kwargs)
def random_docs(start, end, embed_dim=10):
for j in range(start, end):
d = Document()
d.content = f'hello world from {j}'
d.embedding = np.random.random([embed_dim])
yield d
def validate_diff_sources(results, num_shards, docs_before: DocumentArray):
distinct_shards = {}
for doc in results[0].docs:
for match in doc.matches:
if match.tags[ORIGIN_TAG] not in distinct_shards:
distinct_shards[match.tags[ORIGIN_TAG]] = 0
distinct_shards[match.tags[ORIGIN_TAG]] += 1
np.testing.assert_equal(len(distinct_shards.keys()), num_shards)
np.testing.assert_equal(sum(distinct_shards.values()), TOP_K)
# TODO we do not support shards=1 for replicas>1
def assert_folder(dump_path, num_shards):
assert os.path.exists(dump_path)
for i in range(num_shards):
assert os.path.exists(os.path.join(dump_path, str(i)))
assert os.path.exists(os.path.join(dump_path, str(i), 'ids'))
assert os.path.exists(os.path.join(dump_path, str(i), 'vectors'))
assert os.path.exists(os.path.join(dump_path, str(i), 'metas'))
# TODO: add num_shards=7
@pytest.mark.parametrize('num_shards', (2, 3))
def test_shards_numpy_filequery(tmpdir, num_shards):
pod_name = 'index'
os.environ['WORKSPACE'] = str(tmpdir)
os.environ['SHARDS'] = str(num_shards)
docs_indexed = list(random_docs(0, 201))
dump_path = os.path.join(tmpdir, 'dump_path')
dump_docs(docs_indexed, dump_path, num_shards)
assert_folder(dump_path, num_shards)
inputs = list(random_docs(0, 1))
# TODO workspace is wrongly saved to curdir
with Flow.load_config('flow.yml') as flow:
flow.rolling_update(pod_name=pod_name, dump_path=dump_path)
time.sleep(2)
results = flow.post(
on='/tag_search',
inputs=inputs,
parameters={'top_k': TOP_K},
return_results=True,
)
validate_diff_sources(results, num_shards, docs_indexed)
|
from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING:
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import Image as PILImage
from docarray import Document
ArrayType = TypeVar(
'ArrayType',
np.ndarray,
scipy.sparse.spmatrix,
tensorflow.SparseTensor,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
DocumentContentType = Union[bytes, str, ArrayType]
ProtoValueType = Optional[Union[str, bool, float]]
StructValueType = Union[
ProtoValueType, List[ProtoValueType], Dict[str, ProtoValueType]
]
DocumentArraySourceType = Union[
Sequence[Document], Document, Generator[Document], Iterable[Document]
]
T = TypeVar('T')
AnyDNN = TypeVar(
'AnyDNN'
) #: The type of any implementation of a Deep Neural Network object
DocumentArraySingletonIndexType = Union[int, str]
DocumentArrayMultipleIndexType = Union[
slice, Sequence[int], Sequence[str], Sequence[bool], Ellipsis
]
DocumentArraySingleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType], str
]
DocumentArrayMultipleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType],
Sequence[str],
]
DocumentArrayIndexType = Union[
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArraySingleAttributeType,
DocumentArrayMultipleAttributeType,
]
Image = TypeVar(
'Image',
str,
ForwardRef('np.ndarray'),
ForwardRef('PILImage'),
)
Text = TypeVar('Text', bound=str)
Audio = TypeVar('Audio', str, ForwardRef('np.ndarray'))
Video = TypeVar('Video', str, ForwardRef('np.ndarray'))
Mesh = TypeVar('Mesh', str, ForwardRef('np.ndarray'))
Tabular = TypeVar('Tabular', bound=str)
Blob = TypeVar('Blob', str, bytes)
JSON = TypeVar('JSON', str, dict)
|
from typing import (
Union,
TYPE_CHECKING,
TypeVar,
Sequence,
Optional,
List,
Dict,
Generator,
Iterable,
Tuple,
ForwardRef,
)
if TYPE_CHECKING:
import scipy.sparse
import tensorflow
import torch
import numpy as np
from PIL.Image import Image as PILImage
from .. import Document
ArrayType = TypeVar(
'ArrayType',
np.ndarray,
scipy.sparse.spmatrix,
tensorflow.SparseTensor,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
DocumentContentType = Union[bytes, str, ArrayType]
ProtoValueType = Optional[Union[str, bool, float]]
StructValueType = Union[
ProtoValueType, List[ProtoValueType], Dict[str, ProtoValueType]
]
DocumentArraySourceType = Union[
Sequence[Document], Document, Generator[Document], Iterable[Document]
]
T = TypeVar('T')
AnyDNN = TypeVar(
'AnyDNN'
) #: The type of any implementation of a Deep Neural Network object
DocumentArraySingletonIndexType = Union[int, str]
DocumentArrayMultipleIndexType = Union[
slice, Sequence[int], Sequence[str], Sequence[bool], Ellipsis
]
DocumentArraySingleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType], str
]
DocumentArrayMultipleAttributeType = Tuple[
Union[DocumentArraySingletonIndexType, DocumentArrayMultipleIndexType],
Sequence[str],
]
DocumentArrayIndexType = Union[
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArraySingleAttributeType,
DocumentArrayMultipleAttributeType,
]
Image = TypeVar(
'Image',
str,
ForwardRef('np.ndarray'),
ForwardRef('PILImage'),
)
Text = TypeVar('Text', bound=str)
Audio = TypeVar('Audio', str, ForwardRef('np.ndarray'))
Video = TypeVar('Video', str, ForwardRef('np.ndarray'))
Mesh = TypeVar('Mesh', str, ForwardRef('np.ndarray'))
Tabular = TypeVar('Tabular', bound=str)
Blob = TypeVar('Blob', str, bytes)
JSON = TypeVar('JSON', str, dict)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FileManagementToolkit": (
"langchain_community.agent_toolkits.file_management.toolkit"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FileManagementToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FileManagementToolkit": (
"langchain_community.agent_toolkits.file_management.toolkit"
)
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FileManagementToolkit",
]
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDoc):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocList[Image], **kwargs) -> DocList[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocList[Image]([Image(url='https://via.placeholder.com/150.png')]),
return_type=DocList[Image],
)
docs = docs.to_doc_vec()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocList[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(on='/foo', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocList[MyDoc],
response_schema=DocList[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.doc_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(on='/foo', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert docs[0].text == 'hello world'
assert docs.__class__.doc_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[MyDoc], **kwargs) -> DocList[MyDoc]:
assert docs.__class__.doc_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(on='/bar', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert docs[0].text == 'hello world'
assert docs.__class__.doc_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[InputDoc], **kwargs) -> DocList[OutputDoc]:
docs_return = DocList[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocList[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.doc_type == OutputDoc
def test_deployments():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[InputDoc], **kwargs) -> DocList[OutputDoc]:
docs_return = DocList[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocList[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.doc_type == OutputDoc
|
from typing import Optional
import numpy as np
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import AnyTensor, ImageUrl
from jina import Deployment, Executor, Flow, requests
def test_different_document_schema():
class Image(BaseDoc):
tensor: Optional[AnyTensor]
url: ImageUrl
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocList[Image], **kwargs) -> DocList[Image]:
for doc in docs:
doc.tensor = doc.url.load()
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/foo',
inputs=DocList[Image]([Image(url='https://via.placeholder.com/150.png')]),
return_type=DocList[Image],
)
docs = docs.stack()
assert docs.tensor.ndim == 4
def test_send_custom_doc():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/foo')
def foo(self, docs: DocList[MyDoc], **kwargs):
docs[0].text = 'hello world'
with Flow().add(uses=MyExec) as f:
doc = f.post(on='/foo', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert doc[0].text == 'hello world'
def test_input_response_schema():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(
on='/foo',
request_schema=DocList[MyDoc],
response_schema=DocList[MyDoc],
)
def foo(self, docs, **kwargs):
assert docs.__class__.doc_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(on='/foo', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert docs[0].text == 'hello world'
assert docs.__class__.doc_type == MyDoc
def test_input_response_schema_annotation():
class MyDoc(BaseDoc):
text: str
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[MyDoc], **kwargs) -> DocList[MyDoc]:
assert docs.__class__.doc_type == MyDoc
docs[0].text = 'hello world'
return docs
with Flow().add(uses=MyExec) as f:
docs = f.post(on='/bar', inputs=MyDoc(text='hello'), return_type=DocList[MyDoc])
assert docs[0].text == 'hello world'
assert docs.__class__.doc_type == MyDoc
def test_different_output_input():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[InputDoc], **kwargs) -> DocList[OutputDoc]:
docs_return = DocList[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Flow().add(uses=MyExec) as f:
docs = f.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocList[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.doc_type == OutputDoc
def test_deployments():
class InputDoc(BaseDoc):
img: ImageDoc
class OutputDoc(BaseDoc):
embedding: AnyTensor
class MyExec(Executor):
@requests(on='/bar')
def bar(self, docs: DocList[InputDoc], **kwargs) -> DocList[OutputDoc]:
docs_return = DocList[OutputDoc](
[OutputDoc(embedding=np.zeros((100, 1))) for _ in range(len(docs))]
)
return docs_return
with Deployment(uses=MyExec) as dep:
docs = dep.post(
on='/bar',
inputs=InputDoc(img=ImageDoc(tensor=np.zeros((3, 224, 224)))),
return_type=DocList[OutputDoc],
)
assert docs[0].embedding.shape == (100, 1)
assert docs.__class__.doc_type == OutputDoc
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/dev-2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'sphinx_markdown_tables',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
import json
import pytest
import types
from requests import Response
from unittest import mock
from typing import Optional, Type
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.siliconflow import SiliconFlowEmbedding
class MockAsyncResponse:
def __init__(self, json_data) -> None:
self._json_data = json_data
def raise_for_status(self) -> None:
...
async def __aenter__(self) -> "MockAsyncResponse":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[types.TracebackType],
) -> None:
pass
async def json(self) -> dict:
return self._json_data
def test_embedding_class():
emb = SiliconFlowEmbedding()
assert isinstance(emb, BaseEmbedding)
def test_float_format_embedding():
input_text = "..."
mock_response = Response()
mock_response._content = json.dumps(
{
"model": "<string>",
"data": [{"object": "embedding", "embedding": [123], "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
).encode("utf-8")
embedding = SiliconFlowEmbedding(api_key="...")
with mock.patch("requests.Session.post", return_value=mock_response) as mock_post:
actual_result = embedding.get_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "float",
},
headers=embedding._headers,
)
def test_base64_format_embedding():
input_text = "..."
mock_response = Response()
mock_response._content = json.dumps(
{
"model": "<string>",
"data": [{"object": "embedding", "embedding": "AAD2Qg==", "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
).encode("utf-8")
embedding = SiliconFlowEmbedding(api_key="...", encoding_format="base64")
with mock.patch("requests.Session.post", return_value=mock_response) as mock_post:
actual_result = embedding.get_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "base64",
},
headers=embedding._headers,
)
@pytest.mark.asyncio
async def test_float_format_embedding_async():
input_text = "..."
mock_response = MockAsyncResponse(
json_data={
"model": "<string>",
"data": [{"object": "embedding", "embedding": [123], "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
)
embedding = SiliconFlowEmbedding(api_key="...")
with mock.patch(
"aiohttp.ClientSession.post", return_value=mock_response
) as mock_post:
actual_result = await embedding.aget_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "float",
},
headers=embedding._headers,
)
|
import json
import pytest
import types
from requests import Response
from unittest import mock
from typing import Optional, Type
from llama_index.core.embeddings import BaseEmbedding
from llama_index.embeddings.siliconflow import SiliconFlowEmbedding
class MockAsyncResponse:
def __init__(self, json_data) -> None:
self._json_data = json_data
def raise_for_status(self) -> None:
...
async def __aenter__(self) -> "MockAsyncResponse":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc: Optional[BaseException],
tb: Optional[types.TracebackType],
) -> None:
pass
async def json(self) -> dict:
return self._json_data
def test_embedding_class():
emb = SiliconFlowEmbedding()
assert isinstance(emb, BaseEmbedding)
def test_float_format_embedding():
input_text = "..."
mock_response = Response()
mock_response._content = json.dumps(
{
"model": "<string>",
"data": [{"object": "embedding", "embedding": [123], "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
).encode("utf-8")
embedding = SiliconFlowEmbedding(api_key="...")
with mock.patch("requests.Session.post", return_value=mock_response) as mock_post:
actual_result = embedding.get_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "float",
},
headers=embedding._headers,
)
def test_base64_format_embedding():
input_text = "..."
mock_response = Response()
mock_response._content = json.dumps(
{
"model": "<string>",
"data": [{"object": "embedding", "embedding": "AAD2Qg==", "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
).encode("utf-8")
embedding = SiliconFlowEmbedding(api_key="...", encoding_format="base64")
with mock.patch("requests.Session.post", return_value=mock_response) as mock_post:
actual_result = embedding.get_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "base64",
},
headers=embedding._headers,
)
@pytest.mark.asyncio()
async def test_float_format_embedding_async():
input_text = "..."
mock_response = MockAsyncResponse(
json_data={
"model": "<string>",
"data": [{"object": "embedding", "embedding": [123], "index": 0}],
"usage": {
"prompt_tokens": 123,
"completion_tokens": 123,
"total_tokens": 123,
},
}
)
embedding = SiliconFlowEmbedding(api_key="...")
with mock.patch(
"aiohttp.ClientSession.post", return_value=mock_response
) as mock_post:
actual_result = await embedding.aget_query_embedding(input_text)
expected_result = [123]
assert actual_result == expected_result
mock_post.assert_called_once_with(
embedding.base_url,
json={
"model": embedding.model,
"input": [input_text],
"encoding_format": "float",
},
headers=embedding._headers,
)
|
import os
import time
import pytest
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
print('using random port fixture...')
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
if _port is not None and _port not in used_ports:
used_ports.add(_port)
return _port
raise Exception('no available port')
mocker.patch('jina.helper.random_port', new_callable=lambda: _random_port)
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
time.sleep(5)
|
import os
import time
import pytest
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
print('using random port fixture...')
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
if _port is not None and _port not in used_ports:
used_ports.add(_port)
return _port
raise Exception('no available port')
mocker.patch('jina.helper.random_port', new_callable=lambda: _random_port)
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(5)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_persistent_workers_on(
self, mock_is_model_wrapper):
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
def test_not_model_wrapper_and_persistent_workers_off(self):
runner = Mock()
runner.model = Mock()
runner.model.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = False
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.model.bbox_head.use_l1)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_initialize_after_switching(self, mock_is_model_wrapper):
# This simulates the resumption after the switching.
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 285
runner.max_epochs = 300
# epoch + 1 > max_epochs - num_last_epochs .
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock, patch
from mmdet.engine.hooks import YOLOXModeSwitchHook
class TestYOLOXModeSwitchHook(TestCase):
@patch('mmdet.engine.hooks.yolox_mode_switch_hook.is_model_wrapper')
def test_is_model_wrapper_and_persistent_workers_on(
self, mock_is_model_wrapper):
mock_is_model_wrapper.return_value = True
runner = Mock()
runner.model = Mock()
runner.model.module = Mock()
runner.model.module.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = True
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertTrue(hook._restart_dataloader)
self.assertTrue(runner.model.module.bbox_head.use_l1)
self.assertFalse(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
def test_not_model_wrapper_and_persistent_workers_off(self):
runner = Mock()
runner.model = Mock()
runner.model.bbox_head.use_l1 = False
runner.train_dataloader = Mock()
runner.train_dataloader.persistent_workers = False
runner.train_dataloader._DataLoader__initialized = True
runner.epoch = 284
runner.max_epochs = 300
hook = YOLOXModeSwitchHook(num_last_epochs=15)
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.model.bbox_head.use_l1)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
runner.epoch = 285
hook.before_train_epoch(runner)
self.assertFalse(hook._restart_dataloader)
self.assertTrue(runner.train_dataloader._DataLoader__initialized)
|
from llama_index.vector_stores.faiss.base import FaissVectorStore
from llama_index.vector_stores.faiss.map_store import FaissMapVectorStore
__all__ = ["FaissVectorStore", "FaissMapVectorStore"]
|
from llama_index.vector_stores.faiss.base import FaissVectorStore
__all__ = ["FaissVectorStore"]
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
DocumentBlock,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
"AudioBlock",
"DocumentBlock",
]
|
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
ImageBlock,
LLMMetadata,
MessageRole,
TextBlock,
AudioBlock,
)
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.mock import MockLLM
__all__ = [
"CustomLLM",
"LLM",
"ChatMessage",
"ChatResponse",
"ChatResponseAsyncGen",
"ChatResponseGen",
"CompletionResponse",
"CompletionResponseAsyncGen",
"CompletionResponseGen",
"LLMMetadata",
"MessageRole",
"MockLLM",
"ImageBlock",
"TextBlock",
"AudioBlock",
]
|
"""Configuration for unit tests."""
from collections.abc import Sequence
from importlib import util
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
|
"""Configuration for unit tests."""
from importlib import util
from typing import Dict, Sequence
import pytest
from pytest import Config, Function, Parser
def pytest_addoption(parser: Parser) -> None:
"""Add custom command line options to pytest."""
parser.addoption(
"--only-extended",
action="store_true",
help="Only run extended tests. Does not allow skipping any extended tests.",
)
parser.addoption(
"--only-core",
action="store_true",
help="Only run core tests. Never runs any extended tests.",
)
def pytest_collection_modifyitems(config: Config, items: Sequence[Function]) -> None:
"""Add implementations for handling custom markers.
At the moment, this adds support for a custom `requires` marker.
The `requires` marker is used to denote tests that require one or more packages
to be installed to run. If the package is not installed, the test is skipped.
The `requires` marker syntax is:
.. code-block:: python
@pytest.mark.requires("package1", "package2")
def test_something():
...
"""
# Mapping from the name of a package to whether it is installed or not.
# Used to avoid repeated calls to `util.find_spec`
required_pkgs_info: Dict[str, bool] = {}
only_extended = config.getoption("--only-extended") or False
only_core = config.getoption("--only-core") or False
if only_extended and only_core:
raise ValueError("Cannot specify both `--only-extended` and `--only-core`.")
for item in items:
requires_marker = item.get_closest_marker("requires")
if requires_marker is not None:
if only_core:
item.add_marker(pytest.mark.skip(reason="Skipping not a core test."))
continue
# Iterate through the list of required packages
required_pkgs = requires_marker.args
for pkg in required_pkgs:
# If we haven't yet checked whether the pkg is installed
# let's check it and store the result.
if pkg not in required_pkgs_info:
try:
installed = util.find_spec(pkg) is not None
except Exception:
installed = False
required_pkgs_info[pkg] = installed
if not required_pkgs_info[pkg]:
if only_extended:
pytest.fail(
f"Package `{pkg}` is not installed but is required for "
f"extended tests. Please install the given package and "
f"try again.",
)
else:
# If the package is not installed, we immediately break
# and mark the test as skipped.
item.add_marker(
pytest.mark.skip(reason=f"Requires pkg: `{pkg}`")
)
break
else:
if only_extended:
item.add_marker(
pytest.mark.skip(reason="Skipping not an extended test.")
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmdet.datasets import OpenImagesDataset
from mmdet.evaluation import OpenImagesMetric
from mmdet.utils import register_all_modules
class TestOpenImagesMetric(unittest.TestCase):
def _create_dummy_results(self):
bboxes = np.array([[23.2172, 31.7541, 987.3413, 357.8443],
[100, 120, 130, 150], [150, 160, 190, 200],
[250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 0, 0])
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels))
def test_init(self):
# test invalid iou_thrs
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs={'a', 0.5}, ioa_thrs={'b', 0.5})
# test ioa and iou_thrs length not equal
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs=[0.5, 0.75], ioa_thrs=[0.5])
metric = OpenImagesMetric(iou_thrs=0.6)
self.assertEqual(metric.iou_thrs, [0.6])
def test_eval(self):
register_all_modules()
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/image-metas.pkl',
pipeline=[
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'instances'))
])
dataset.full_init()
data_sample = dataset[0]['data_samples'].to_dict()
data_sample['pred_instances'] = self._create_dummy_results()
metric = OpenImagesMetric()
metric.dataset_meta = dataset.metainfo
metric.process({}, [data_sample])
results = metric.evaluate(size=len(dataset))
targets = {'openimages/AP50': 1.0, 'openimages/mAP': 1.0}
self.assertDictEqual(results, targets)
# test multi-threshold
metric = OpenImagesMetric(iou_thrs=[0.1, 0.5], ioa_thrs=[0.1, 0.5])
metric.dataset_meta = dataset.metainfo
metric.process({}, [data_sample])
results = metric.evaluate(size=len(dataset))
targets = {
'openimages/AP10': 1.0,
'openimages/AP50': 1.0,
'openimages/mAP': 1.0
}
self.assertDictEqual(results, targets)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmdet.datasets import OpenImagesDataset
from mmdet.evaluation import OpenImagesMetric
from mmdet.utils import register_all_modules
class TestOpenImagesMetric(unittest.TestCase):
def _create_dummy_results(self):
bboxes = np.array([[23.2172, 31.7541, 987.3413, 357.8443],
[100, 120, 130, 150], [150, 160, 190, 200],
[250, 260, 350, 360]])
scores = np.array([1.0, 0.98, 0.96, 0.95])
labels = np.array([0, 0, 0, 0])
return dict(
bboxes=torch.from_numpy(bboxes),
scores=torch.from_numpy(scores),
labels=torch.from_numpy(labels))
def test_init(self):
# test invalid iou_thrs
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs={'a', 0.5}, ioa_thrs={'b', 0.5})
# test ioa and iou_thrs length not equal
with self.assertRaises(AssertionError):
OpenImagesMetric(iou_thrs=[0.5, 0.75], ioa_thrs=[0.5])
metric = OpenImagesMetric(iou_thrs=0.6)
self.assertEqual(metric.iou_thrs, [0.6])
def test_eval(self):
register_all_modules()
dataset = OpenImagesDataset(
data_root='tests/data/OpenImages/',
ann_file='annotations/oidv6-train-annotations-bbox.csv',
data_prefix=dict(img='OpenImages/train/'),
label_file='annotations/class-descriptions-boxable.csv',
hierarchy_file='annotations/bbox_labels_600_hierarchy.json',
meta_file='annotations/image-metas.pkl',
pipeline=[
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'instances'))
])
dataset.full_init()
data_sample = dataset[0]['data_sample'].to_dict()
metric = OpenImagesMetric()
metric.dataset_meta = dataset.metainfo
metric.process(
data_batch=[dict(inputs=None, data_sample=data_sample)],
predictions=[dict(pred_instances=self._create_dummy_results())])
results = metric.evaluate(size=len(dataset))
targets = {'openimages/AP50': 1.0, 'openimages/mAP': 1.0}
self.assertDictEqual(results, targets)
# test multi-threshold
metric = OpenImagesMetric(iou_thrs=[0.1, 0.5], ioa_thrs=[0.1, 0.5])
metric.dataset_meta = dataset.metainfo
metric.process(
data_batch=[dict(inputs=None, data_sample=data_sample)],
predictions=[dict(pred_instances=self._create_dummy_results())])
results = metric.evaluate(size=len(dataset))
targets = {
'openimages/AP10': 1.0,
'openimages/AP50': 1.0,
'openimages/mAP': 1.0
}
self.assertDictEqual(results, targets)
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.38"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.37"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
import gzip
import sys
from datetime import datetime
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
Trainer,
TrainingArguments,
)
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
"""
This file runs Masked Language Model. You provide a training file. Each line is interpreted as a sentence / paragraph.
Optionally, you can also provide a dev file.
The fine-tuned model is stored in the output/model_name folder.
Usage:
python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]
"""
import gzip
import sys
from datetime import datetime
from transformers import (
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
Trainer,
TrainingArguments,
)
if len(sys.argv) < 3:
print("Usage: python train_mlm.py model_name data/train_sentences.txt [data/dev_sentences.txt]")
exit()
model_name = sys.argv[1]
per_device_train_batch_size = 64
save_steps = 1000 # Save model every 1k steps
num_train_epochs = 3 # Number of epochs
use_fp16 = False # Set to True, if your GPU supports FP16 operations
max_length = 100 # Max length for a text input
do_whole_word_mask = True # If set to true, whole words are masked
mlm_prob = 0.15 # Probability that a word is replaced by a [MASK] token
# Load the model
model = AutoModelForMaskedLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
output_dir = "output/{}-{}".format(model_name.replace("/", "_"), datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
print("Save checkpoints to:", output_dir)
##### Load our training datasets
train_sentences = []
train_path = sys.argv[2]
with gzip.open(train_path, "rt", encoding="utf8") if train_path.endswith(".gz") else open(
train_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
print("Train sentences:", len(train_sentences))
dev_sentences = []
if len(sys.argv) >= 4:
dev_path = sys.argv[3]
with gzip.open(dev_path, "rt", encoding="utf8") if dev_path.endswith(".gz") else open(
dev_path, "r", encoding="utf8"
) as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
dev_sentences.append(line)
print("Dev sentences:", len(dev_sentences))
# A dataset wrapper, that tokenizes our data on-the-fly
class TokenizedSentencesDataset:
def __init__(self, sentences, tokenizer, max_length, cache_tokenization=False):
self.tokenizer = tokenizer
self.sentences = sentences
self.max_length = max_length
self.cache_tokenization = cache_tokenization
def __getitem__(self, item):
if not self.cache_tokenization:
return self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
if isinstance(self.sentences[item], str):
self.sentences[item] = self.tokenizer(
self.sentences[item],
add_special_tokens=True,
truncation=True,
max_length=self.max_length,
return_special_tokens_mask=True,
)
return self.sentences[item]
def __len__(self):
return len(self.sentences)
train_dataset = TokenizedSentencesDataset(train_sentences, tokenizer, max_length)
dev_dataset = (
TokenizedSentencesDataset(dev_sentences, tokenizer, max_length, cache_tokenization=True)
if len(dev_sentences) > 0
else None
)
##### Training arguments
if do_whole_word_mask:
data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
else:
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=mlm_prob)
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
num_train_epochs=num_train_epochs,
evaluation_strategy="steps" if dev_dataset is not None else "no",
per_device_train_batch_size=per_device_train_batch_size,
eval_steps=save_steps,
save_steps=save_steps,
logging_steps=save_steps,
save_total_limit=1,
prediction_loss_only=True,
fp16=use_fp16,
)
trainer = Trainer(
model=model, args=training_args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=dev_dataset
)
print("Save tokenizer to:", output_dir)
tokenizer.save_pretrained(output_dir)
trainer.train()
print("Save model to:", output_dir)
model.save_pretrained(output_dir)
print("Training done")
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.9.0'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '0.8.4'
def parse_version_info(version_str):
"""Parse the version information.
Args:
version_str (str): version string like '0.1.0'.
Returns:
tuple: version information contains major, minor, micro version.
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
import pytest
from jina import Client
from jina.enums import GatewayProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', GatewayProtocolType.HTTP),
('grpc', GatewayProtocolType.GRPC),
('ws', GatewayProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.mark.parametrize('tls', [True, False])
@pytest.mark.parametrize('hostname', ['localhost', 'executor.jina.ai'])
def test_host_unpacking(protocol, gateway_type, tls, hostname):
port = 1234
protocol = f'{protocol}s' if tls and protocol else protocol
scheme = f'{protocol}://' if protocol else ''
host = f'{scheme}{hostname}:{port}'
c = Client(host=host) if scheme else Client(host=host, tls=tls)
if gateway_type:
assert c.args.protocol == gateway_type
assert c.args.host == hostname
assert c.args.port == port
assert c.args.tls == tls
@pytest.mark.parametrize('protocol', ['https', 'grpcs', 'wss'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port_tls(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 443
@pytest.mark.parametrize('protocol', ['http', 'grpc', 'ws'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 80
def test_delete_slash_host():
host = f'http://localhost/'
c = Client(host=host)
assert c.args.host == 'localhost'
def test_host_unpacking_basic():
protocol = 'http'
hostname = 'localhost'
host = f'{protocol}://{hostname}'
c = Client(host=host)
assert c.args.protocol == GatewayProtocolType.HTTP
assert c.args.host == hostname
def test_host_unpacking_duplicate():
with pytest.raises(ValueError):
Client(host=f'http://localhost:1234', port=1234)
def test_log_config_arg():
cli_args = ['--log-config', 'logging.custom.yml']
from jina.parsers import set_client_cli_parser
args = set_client_cli_parser().parse_args(cli_args)
assert args.log_config == 'logging.custom.yml'
|
import pytest
from jina import Client
from jina.enums import GatewayProtocolType
@pytest.mark.parametrize(
'protocol, gateway_type',
[
('http', GatewayProtocolType.HTTP),
('grpc', GatewayProtocolType.GRPC),
('ws', GatewayProtocolType.WEBSOCKET),
(None, None),
],
)
@pytest.mark.parametrize('tls', [True, False])
@pytest.mark.parametrize('hostname', ['localhost', 'executor.jina.ai'])
def test_host_unpacking(protocol, gateway_type, tls, hostname):
port = 1234
protocol = f'{protocol}s' if tls and protocol else protocol
scheme = f'{protocol}://' if protocol else ''
host = f'{scheme}{hostname}:{port}'
c = Client(host=host) if scheme else Client(host=host, tls=tls)
if gateway_type:
assert c.args.protocol == gateway_type
assert c.args.host == hostname
assert c.args.port == port
assert c.args.tls == tls
@pytest.mark.parametrize('protocol', ['https', 'grpcs', 'wss'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port_tls(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 443
@pytest.mark.parametrize('protocol', ['http', 'grpc', 'ws'])
@pytest.mark.parametrize('port', [1234, None])
def test_host_unpacking_port(protocol, port):
port_scheme = f':{port}' if port else ''
host = f'{protocol}://localhost{port_scheme}'
c = Client(host=host)
assert c.args.port == port if port else 80
def test_delete_slash_host():
host = f'http://localhost/'
c = Client(host=host)
assert c.args.host == 'localhost'
def test_host_unpacking_basic():
protocol = 'http'
hostname = 'localhost'
host = f'{protocol}://{hostname}'
c = Client(host=host)
assert c.args.protocol == GatewayProtocolType.HTTP
assert c.args.host == hostname
def test_host_unpacking_duplicate():
with pytest.raises(ValueError):
Client(host=f'http://localhost:1234', port=1234)
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
from docarray.typing.id import ID
from docarray.typing.tensor.audio import AudioNdArray
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video import VideoNdArray
from docarray.typing.url import (
AnyUrl,
AudioUrl,
ImageUrl,
Mesh3DUrl,
PointCloud3DUrl,
TextUrl,
VideoUrl,
)
__all__ = [
'NdArray',
'AudioNdArray',
'VideoNdArray',
'AnyEmbedding',
'ImageUrl',
'AudioUrl',
'TextUrl',
'Mesh3DUrl',
'PointCloud3DUrl',
'VideoUrl',
'AnyUrl',
'ID',
'AnyTensor',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor import TorchEmbedding, TorchTensor # noqa: F401
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor # noqa
__all__.extend(
['AudioTorchTensor', 'TorchEmbedding', 'TorchTensor', 'VideoTorchTensor']
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.