input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
"""Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
"""
import pytest as pytest
from langchain_fireworks import Fireworks
_MODEL = "accounts/fireworks/models/llama-v3p1-8b-instruct"
def test_fireworks_call() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model=_MODEL,
temperature=0.2,
max_tokens=250,
)
output = llm.invoke("Say foo:")
assert llm._llm_type == "fireworks"
assert isinstance(output, str)
assert len(output) > 0
async def test_fireworks_acall() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model=_MODEL,
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "fireworks"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model=_MODEL)
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model=_MODEL)
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_abatch() -> None:
"""Test streaming tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
def test_batch() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
def test_invoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model=_MODEL)
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result, str)
|
"""Test Fireworks API wrapper.
In order to run this test, you need to have an Fireworks api key.
You can get it by registering for free at https://api.fireworks.ai/.
A test key can be found at https://api.fireworks.ai/settings/api-keys
You'll then need to set FIREWORKS_API_KEY environment variable to your api key.
"""
import pytest as pytest
from langchain_fireworks import Fireworks
def test_fireworks_call() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
output = llm.invoke("Say foo:")
assert llm._llm_type == "fireworks"
assert isinstance(output, str)
assert len(output) > 0
async def test_fireworks_acall() -> None:
"""Test simple call to fireworks."""
llm = Fireworks(
model="accounts/fireworks/models/mixtral-8x7b-instruct",
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "fireworks"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1
def test_stream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
for token in llm.stream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_astream() -> None:
"""Test streaming tokens from OpenAI."""
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
async for token in llm.astream("I'm Pickle Rick"):
assert isinstance(token, str)
async def test_abatch() -> None:
"""Test streaming tokens from Fireworks."""
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
result = await llm.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_abatch_tags() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
result = await llm.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token, str)
def test_batch() -> None:
"""Test batch tokens from Fireworks."""
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
result = llm.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token, str)
async def test_ainvoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
result = await llm.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result, str)
def test_invoke() -> None:
"""Test invoke tokens from Fireworks."""
llm = Fireworks(model="accounts/fireworks/models/mixtral-8x7b-instruct")
result = llm.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result, str)
|
from typing import Optional, List
import httpx
from httpx import Timeout
from llama_index.core.base.embeddings.base import BaseEmbedding, Embedding
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
DEFAULT_REQUEST_TIMEOUT = 30.0
class LlamafileEmbedding(BaseEmbedding):
"""
Class for llamafile embeddings.
llamafile lets you distribute and run large language models with a
single file.
To get started, see: https://github.com/Mozilla-Ocho/llamafile
To use this class, you will need to first:
1. Download a llamafile.
2. Make the downloaded file executable: `chmod +x path/to/model.llamafile`
3. Start the llamafile in server mode with embeddings enabled:
`./path/to/model.llamafile --server --nobrowser --embedding`
"""
base_url: str = Field(
description="base url of the llamafile server", default="http://localhost:8080"
)
request_timeout: float = Field(
default=DEFAULT_REQUEST_TIMEOUT,
description="The timeout for making http request to llamafile API server",
)
def __init__(
self,
base_url: str = "http://localhost:8080",
callback_manager: Optional[CallbackManager] = None,
**kwargs,
) -> None:
super().__init__(
base_url=base_url,
callback_manager=callback_manager or CallbackManager([]),
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "LlamafileEmbedding"
def _get_query_embedding(self, query: str) -> Embedding:
return self._get_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> Embedding:
return await self._aget_text_embedding(query)
def _get_text_embedding(self, text: str) -> Embedding:
"""
Embed the input text synchronously.
"""
request_body = {
"content": text,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
response = client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return response.json()["embedding"]
async def _aget_text_embedding(self, text: str) -> Embedding:
"""
Embed the input text asynchronously.
"""
request_body = {
"content": text,
}
async with httpx.AsyncClient(timeout=Timeout(self.request_timeout)) as client:
response = await client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return response.json()["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Embed the input texts synchronously.
"""
request_body = {
"content": texts,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
response = client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return [output["embedding"] for output in response.json()["results"]]
async def _aget_text_embeddings(self, texts: List[str]) -> Embedding:
"""
Embed the input text asynchronously.
"""
request_body = {
"content": texts,
}
async with httpx.AsyncClient(timeout=Timeout(self.request_timeout)) as client:
response = await client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return [output["embedding"] for output in response.json()["results"]]
|
from typing import Optional, List
import httpx
from httpx import Timeout
from llama_index.core.base.embeddings.base import BaseEmbedding, Embedding
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
DEFAULT_REQUEST_TIMEOUT = 30.0
class LlamafileEmbedding(BaseEmbedding):
"""Class for llamafile embeddings.
llamafile lets you distribute and run large language models with a
single file.
To get started, see: https://github.com/Mozilla-Ocho/llamafile
To use this class, you will need to first:
1. Download a llamafile.
2. Make the downloaded file executable: `chmod +x path/to/model.llamafile`
3. Start the llamafile in server mode with embeddings enabled:
`./path/to/model.llamafile --server --nobrowser --embedding`
"""
base_url: str = Field(
description="base url of the llamafile server", default="http://localhost:8080"
)
request_timeout: float = Field(
default=DEFAULT_REQUEST_TIMEOUT,
description="The timeout for making http request to llamafile API server",
)
def __init__(
self,
base_url: str = "http://localhost:8080",
callback_manager: Optional[CallbackManager] = None,
**kwargs,
) -> None:
super().__init__(
base_url=base_url,
callback_manager=callback_manager or CallbackManager([]),
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "LlamafileEmbedding"
def _get_query_embedding(self, query: str) -> Embedding:
return self._get_text_embedding(query)
async def _aget_query_embedding(self, query: str) -> Embedding:
return await self._aget_text_embedding(query)
def _get_text_embedding(self, text: str) -> Embedding:
"""
Embed the input text synchronously.
"""
request_body = {
"content": text,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
response = client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return response.json()["embedding"]
async def _aget_text_embedding(self, text: str) -> Embedding:
"""
Embed the input text asynchronously.
"""
request_body = {
"content": text,
}
async with httpx.AsyncClient(timeout=Timeout(self.request_timeout)) as client:
response = await client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return response.json()["embedding"]
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Embed the input texts synchronously.
"""
request_body = {
"content": texts,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
response = client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return [output["embedding"] for output in response.json()["results"]]
async def _aget_text_embeddings(self, texts: List[str]) -> Embedding:
"""
Embed the input text asynchronously.
"""
request_body = {
"content": texts,
}
async with httpx.AsyncClient(timeout=Timeout(self.request_timeout)) as client:
response = await client.post(
url=f"{self.base_url}/embedding",
headers={"Content-Type": "application/json"},
json=request_body,
)
response.encoding = "utf-8"
response.raise_for_status()
return [output["embedding"] for output in response.json()["results"]]
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
def disable_beta_transforms_warning():
# Noop, only exists to avoid breaking existing code.
# See https://github.com/pytorch/vision/issues/7896
pass
|
import os
import warnings
from modulefinder import Module
import torch
from torchvision import _meta_registrations, datasets, io, models, ops, transforms, utils
from .extension import _HAS_OPS
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Check if torchvision is being imported within the root folder
if not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == os.path.join(
os.path.realpath(os.getcwd()), "torchvision"
):
message = (
"You are importing torchvision within its own root folder ({}). "
"This is not expected to work and may give errors. Please exit the "
"torchvision project source and relaunch your python interpreter."
)
warnings.warn(message.format(os.getcwd()))
_image_backend = "PIL"
_video_backend = "pyav"
def set_image_backend(backend):
"""
Specifies the package used to load images.
Args:
backend (string): Name of the image backend. one of {'PIL', 'accimage'}.
The :mod:`accimage` package uses the Intel IPP library. It is
generally faster than PIL, but does not support as many operations.
"""
global _image_backend
if backend not in ["PIL", "accimage"]:
raise ValueError(f"Invalid backend '{backend}'. Options are 'PIL' and 'accimage'")
_image_backend = backend
def get_image_backend():
"""
Gets the name of the package used to load images
"""
return _image_backend
def set_video_backend(backend):
"""
Specifies the package used to decode videos.
Args:
backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.
The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic
binding for the FFmpeg libraries.
The :mod:`video_reader` package includes a native C++ implementation on
top of FFMPEG libraries, and a python API of TorchScript custom operator.
It generally decodes faster than :mod:`pyav`, but is perhaps less robust.
.. note::
Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'
backend, please compile torchvision from source.
"""
global _video_backend
if backend not in ["pyav", "video_reader", "cuda"]:
raise ValueError("Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'" % backend)
if backend == "video_reader" and not io._HAS_VIDEO_OPT:
# TODO: better messages
message = "video_reader video backend is not available. Please compile torchvision from source and try again"
raise RuntimeError(message)
elif backend == "cuda" and not io._HAS_GPU_VIDEO_DECODER:
# TODO: better messages
message = "cuda video backend is not available."
raise RuntimeError(message)
else:
_video_backend = backend
def get_video_backend():
"""
Returns the currently active video backend used to decode videos.
Returns:
str: Name of the video backend. one of {'pyav', 'video_reader'}.
"""
return _video_backend
def _is_tracing():
return torch._C._get_tracing_state()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import MagicMock, Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv2 = nn.Conv2d(
in_channels=2,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv3 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return x1, x2
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = MagicMock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(
dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv2 and conv3 are not in the
# computational graph which is with x1.sum() as root.
assert 'conv2.weight' in dummy_runner.logger.msg
assert 'conv2.bias' in dummy_runner.logger.msg
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_called()
optimizer_hook.detect_anomalous_parameters.assert_called()
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv3 are not in the computational graph
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv2.weight' not in dummy_runner.logger.msg
assert 'conv2.bias' not in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
# grad_clip is None and detect_anomalous_parameters is False
optimizer_hook = OptimizerHook(detect_anomalous_params=False)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.after_train_iter(dummy_runner, 0)
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_not_called()
optimizer_hook.detect_anomalous_parameters.assert_not_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from torch import nn
from mmengine.hooks import OptimizerHook
class TestOptimizerHook:
def test_after_train_iter(self):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv2 = nn.Conv2d(
in_channels=2,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
self.conv3 = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=3,
stride=1,
padding=1,
dilation=1)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
return x1, x2
model = Model()
x = torch.rand(1, 1, 3, 3)
dummy_runner = Mock()
dummy_runner.optimizer.zero_grad = Mock(return_value=None)
dummy_runner.optimizer.step = Mock(return_value=None)
dummy_runner.model = model
dummy_runner.outputs = dict()
dummy_runner.outputs['num_samples'] = 0
class DummyLogger():
def __init__(self):
self.msg = ''
def log(self, msg=None, **kwargs):
self.msg += msg
dummy_runner.logger = DummyLogger()
optimizer_hook = OptimizerHook(
dict(max_norm=2), detect_anomalous_params=True)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv2 and conv3 are not in the
# computational graph which is with x1.sum() as root.
assert 'conv2.weight' in dummy_runner.logger.msg
assert 'conv2.bias' in dummy_runner.logger.msg
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_called()
optimizer_hook.detect_anomalous_parameters.assert_called()
dummy_runner.outputs['loss'] = model(x)[1].sum()
dummy_runner.logger.msg = ''
optimizer_hook.after_train_iter(dummy_runner, 0)
# assert the parameters of conv3 are not in the computational graph
assert 'conv3.weight' in dummy_runner.logger.msg
assert 'conv3.bias' in dummy_runner.logger.msg
assert 'conv2.weight' not in dummy_runner.logger.msg
assert 'conv2.bias' not in dummy_runner.logger.msg
assert 'conv1.weight' not in dummy_runner.logger.msg
assert 'conv1.bias' not in dummy_runner.logger.msg
# grad_clip is None and detect_anomalous_parameters is False
optimizer_hook = OptimizerHook(detect_anomalous_params=False)
optimizer_hook.detect_anomalous_parameters = Mock(
wraps=optimizer_hook.detect_anomalous_parameters)
optimizer_hook.clip_grads = Mock(wraps=optimizer_hook.clip_grads)
dummy_runner.outputs['loss'] = model(x)[0].sum()
dummy_runner.outputs['loss'].backward = Mock(
wraps=dummy_runner.outputs['loss'].backward)
optimizer_hook.after_train_iter(dummy_runner, 0)
dummy_runner.optimizer.step.assert_called()
dummy_runner.outputs['loss'].backward.assert_called()
optimizer_hook.clip_grads.assert_not_called()
optimizer_hook.detect_anomalous_parameters.assert_not_called()
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
from .TopKActivation import TopKActivation
__all__ = ["CSRSparsity", "TopKActivation", "MLMTransformer", "SpladePooling"]
|
from __future__ import annotations
from .CSRSparsity import CSRSparsity
from .MLMTransformer import MLMTransformer
from .SpladePooling import SpladePooling
from .TopKActivation import TopKActivation
__all__ = ["CSRSparsity", "TopKActivation", "MLMTransformer", "SpladePooling"]
# TODO : Add in models the possibility to have the MLM head(for splade)
|
import os
import pytest as pytest
from jina import Flow, DocumentArray, Document
from ..redis_storage import RedisStorage
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_flow(docs, docker_compose):
f = Flow().add(uses=RedisStorage)
with f:
f.post(on='/index', inputs=docs)
resp = f.post(on='/search', inputs=DocumentArray([Document(id=doc.id) for doc in docs]), return_results=True)
assert len(resp[0].docs) == len(docs)
assert all(doc_a.id == doc_b.id for doc_a, doc_b in zip(resp[0].docs, docs))
|
import os
import pytest as pytest
from jina import Flow, DocumentArray, Document
from .. import RedisStorage
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_flow(docs, docker_compose):
f = Flow().add(uses=RedisStorage)
with f:
f.post(on='/index', inputs=docs)
resp = f.post(on='/search', inputs=DocumentArray([Document(id=doc.id) for doc in docs]), return_results=True)
assert len(resp[0].docs) == len(docs)
assert all(doc_a.id == doc_b.id for doc_a, doc_b in zip(resp[0].docs, docs))
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (string): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class SUN397(VisionDataset):
"""`The SUN397 Data Set <https://vision.princeton.edu/projects/2010/SUN/>`_.
The SUN397 or Scene UNderstanding (SUN) is a dataset for scene recognition consisting of
397 categories with 108'754 images.
Args:
root (string): Root directory of the dataset.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_DATASET_URL = "http://vision.princeton.edu/projects/2010/SUN/SUN397.tar.gz"
_DATASET_MD5 = "8ca2778205c41d23104230ba66911c7a"
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUN397"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
with open(self._data_dir / "ClassName.txt") as f:
self.classes = [c[3:].strip() for c in f]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._image_files = list(self._data_dir.rglob("sun_*.jpg"))
self._labels = [
self.class_to_idx["/".join(path.relative_to(self._data_dir).parts[1:-1])] for path in self._image_files
]
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def _check_exists(self) -> bool:
return self._data_dir.is_dir()
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._DATASET_URL, download_root=self.root, md5=self._DATASET_MD5)
|
"""
=========================
Caching nearest neighbors
=========================
This example demonstrates how to precompute the k nearest neighbors before
using them in KNeighborsClassifier. KNeighborsClassifier can compute the
nearest neighbors internally, but precomputing them can have several benefits,
such as finer parameter control, caching for multiple use, or custom
implementations.
Here we use the caching property of pipelines to cache the nearest neighbors
graph between multiple fits of KNeighborsClassifier. The first call is slow
since it computes the neighbors graph, while subsequent calls are faster as they
do not need to recompute the graph. Here the durations are small since the
dataset is small, but the gain can be more substantial when the dataset grows
larger, or when the grid of parameter to search is large.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from tempfile import TemporaryDirectory
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier, KNeighborsTransformer
from sklearn.pipeline import Pipeline
X, y = load_digits(return_X_y=True)
n_neighbors_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# The transformer computes the nearest neighbors graph using the maximum number
# of neighbors necessary in the grid search. The classifier model filters the
# nearest neighbors graph as required by its own n_neighbors parameter.
graph_model = KNeighborsTransformer(n_neighbors=max(n_neighbors_list), mode="distance")
classifier_model = KNeighborsClassifier(metric="precomputed")
# Note that we give `memory` a directory to cache the graph computation
# that will be used several times when tuning the hyperparameters of the
# classifier.
with TemporaryDirectory(prefix="sklearn_graph_cache_") as tmpdir:
full_model = Pipeline(
steps=[("graph", graph_model), ("classifier", classifier_model)], memory=tmpdir
)
param_grid = {"classifier__n_neighbors": n_neighbors_list}
grid_model = GridSearchCV(full_model, param_grid)
grid_model.fit(X, y)
# Plot the results of the grid search.
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
axes[0].errorbar(
x=n_neighbors_list,
y=grid_model.cv_results_["mean_test_score"],
yerr=grid_model.cv_results_["std_test_score"],
)
axes[0].set(xlabel="n_neighbors", title="Classification accuracy")
axes[1].errorbar(
x=n_neighbors_list,
y=grid_model.cv_results_["mean_fit_time"],
yerr=grid_model.cv_results_["std_fit_time"],
color="r",
)
axes[1].set(xlabel="n_neighbors", title="Fit time (with caching)")
fig.tight_layout()
plt.show()
|
"""
=========================
Caching nearest neighbors
=========================
This examples demonstrates how to precompute the k nearest neighbors before
using them in KNeighborsClassifier. KNeighborsClassifier can compute the
nearest neighbors internally, but precomputing them can have several benefits,
such as finer parameter control, caching for multiple use, or custom
implementations.
Here we use the caching property of pipelines to cache the nearest neighbors
graph between multiple fits of KNeighborsClassifier. The first call is slow
since it computes the neighbors graph, while subsequent call are faster as they
do not need to recompute the graph. Here the durations are small since the
dataset is small, but the gain can be more substantial when the dataset grows
larger, or when the grid of parameter to search is large.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from tempfile import TemporaryDirectory
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier, KNeighborsTransformer
from sklearn.pipeline import Pipeline
X, y = load_digits(return_X_y=True)
n_neighbors_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# The transformer computes the nearest neighbors graph using the maximum number
# of neighbors necessary in the grid search. The classifier model filters the
# nearest neighbors graph as required by its own n_neighbors parameter.
graph_model = KNeighborsTransformer(n_neighbors=max(n_neighbors_list), mode="distance")
classifier_model = KNeighborsClassifier(metric="precomputed")
# Note that we give `memory` a directory to cache the graph computation
# that will be used several times when tuning the hyperparameters of the
# classifier.
with TemporaryDirectory(prefix="sklearn_graph_cache_") as tmpdir:
full_model = Pipeline(
steps=[("graph", graph_model), ("classifier", classifier_model)], memory=tmpdir
)
param_grid = {"classifier__n_neighbors": n_neighbors_list}
grid_model = GridSearchCV(full_model, param_grid)
grid_model.fit(X, y)
# Plot the results of the grid search.
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
axes[0].errorbar(
x=n_neighbors_list,
y=grid_model.cv_results_["mean_test_score"],
yerr=grid_model.cv_results_["std_test_score"],
)
axes[0].set(xlabel="n_neighbors", title="Classification accuracy")
axes[1].errorbar(
x=n_neighbors_list,
y=grid_model.cv_results_["mean_fit_time"],
yerr=grid_model.cv_results_["std_fit_time"],
color="r",
)
axes[1].set(xlabel="n_neighbors", title="Fit time (with caching)")
fig.tight_layout()
plt.show()
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residual=False,
norm_cfg=norm_cfg,
init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.))
max_epochs = 73
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[65, 71],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
backbone=dict(
frozen_stages=-1,
zero_init_residual=False,
norm_cfg=norm_cfg,
init_cfg=None),
neck=dict(norm_cfg=norm_cfg),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.))
max_epochs = 73
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[65, 71],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# only keep latest 3 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=3))
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_270k_coco.py'
# lr steps at [0.9, 0.95, 0.975] of the maximum iterations
lr_config = dict(
warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750])
# 90k iterations with batch_size 64 is roughly equivalent to 48 epochs
runner = dict(type='IterBasedRunner', max_iters=90000)
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.single_agent_workflow import SingleAgentRunnerMixin
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(SingleAgentRunnerMixin, BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
# last_chat_response will be used later, after the loop.
# We initialize it so it's valid even when 'response' is empty
last_chat_response = ChatResponse(message=ChatMessage())
async for last_chat_response in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
ctx.write_event_to_stream(
AgentStream(
delta=last_chat_response.delta or "",
response=last_chat_response.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(last_chat_response.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
return AgentOutput(
response=last_chat_response.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if (
tool_call_result.return_direct
and tool_call_result.tool_name != "handoff"
):
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""
Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
await memory.aput_messages(scratchpad)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
from typing import List, Sequence
from llama_index.core.agent.workflow.base_agent import BaseWorkflowAgent
from llama_index.core.agent.workflow.single_agent_workflow import SingleAgentRunnerMixin
from llama_index.core.agent.workflow.workflow_events import (
AgentInput,
AgentOutput,
AgentStream,
ToolCallResult,
)
from llama_index.core.base.llms.types import ChatResponse
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import ChatMessage
from llama_index.core.memory import BaseMemory
from llama_index.core.tools import AsyncBaseTool
from llama_index.core.workflow import Context
class FunctionAgent(SingleAgentRunnerMixin, BaseWorkflowAgent):
"""Function calling agent implementation."""
scratchpad_key: str = "scratchpad"
async def take_step(
self,
ctx: Context,
llm_input: List[ChatMessage],
tools: Sequence[AsyncBaseTool],
memory: BaseMemory,
) -> AgentOutput:
"""Take a single step with the function calling agent."""
if not self.llm.metadata.is_function_calling_model:
raise ValueError("LLM must be a FunctionCallingLLM")
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
current_llm_input = [*llm_input, *scratchpad]
ctx.write_event_to_stream(
AgentInput(input=current_llm_input, current_agent_name=self.name)
)
response = await self.llm.astream_chat_with_tools( # type: ignore
tools, chat_history=current_llm_input, allow_parallel_tool_calls=True
)
# last_chat_response will be used later, after the loop.
# We initialize it so it's valid even when 'response' is empty
last_chat_response = ChatResponse(message=ChatMessage())
async for last_chat_response in response:
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
ctx.write_event_to_stream(
AgentStream(
delta=last_chat_response.delta or "",
response=last_chat_response.message.content or "",
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
)
tool_calls = self.llm.get_tool_calls_from_response( # type: ignore
last_chat_response, error_on_no_tool_call=False
)
# only add to scratchpad if we didn't select the handoff tool
scratchpad.append(last_chat_response.message)
await ctx.set(self.scratchpad_key, scratchpad)
raw = (
last_chat_response.raw.model_dump()
if isinstance(last_chat_response.raw, BaseModel)
else last_chat_response.raw
)
return AgentOutput(
response=last_chat_response.message,
tool_calls=tool_calls or [],
raw=raw,
current_agent_name=self.name,
)
async def handle_tool_call_results(
self, ctx: Context, results: List[ToolCallResult], memory: BaseMemory
) -> None:
"""Handle tool call results for function calling agent."""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
for tool_call_result in results:
scratchpad.append(
ChatMessage(
role="tool",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
if (
tool_call_result.return_direct
and tool_call_result.tool_name != "handoff"
):
scratchpad.append(
ChatMessage(
role="assistant",
content=str(tool_call_result.tool_output.content),
additional_kwargs={"tool_call_id": tool_call_result.tool_id},
)
)
break
await ctx.set(self.scratchpad_key, scratchpad)
async def finalize(
self, ctx: Context, output: AgentOutput, memory: BaseMemory
) -> AgentOutput:
"""Finalize the function calling agent.
Adds all in-progress messages to memory.
"""
scratchpad: List[ChatMessage] = await ctx.get(self.scratchpad_key, default=[])
await memory.aput_messages(scratchpad)
# reset scratchpad
await ctx.set(self.scratchpad_key, [])
return output
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
Eg. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def before_epoch(self, runner) -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
"""
self.t = time.time()
def before_iter(self, runner, data_batch: DATA_BATCH = None) -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'data_time': time.time() - self.t})
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs:
Optional[Union[dict, Sequence[BaseDataSample]]] = None) \
-> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'time': time.time() - self.t})
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
Eg. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def before_epoch(self, runner) -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
"""
self.t = time.time()
def before_iter(self, runner, data_batch: DATA_BATCH = None) -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'data_time': time.time() - self.t})
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
# TODO: update for new logging system
runner.log_buffer.update({'time': time.time() - self.t})
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
# Follow the official implementation,
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar
# we should use the legacy coordinate system in mmdet 1.x,
# which means w, h should be computed as 'x2 - x1 + 1` and
# `y2 - y1 + 1`
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger,
use_legacy_coordinate=True)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes,
results,
proposal_nums,
iou_thrs,
logger=logger,
use_legacy_coordinate=True)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from mmcv.utils import print_log
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
if metric == 'mAP':
assert isinstance(iou_thrs, list)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou_thr in enumerate(iou_thrs):
eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_client.http.exceptions import UnexpectedResponse
from qdrant_client.http.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
return PointStruct(
id=self._map_id(doc.id),
payload=dict(_serialized=doc.to_base64(**self.serialization_config)),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
|
from abc import abstractmethod
from typing import Iterable, Iterator
from qdrant_client import QdrantClient
from qdrant_openapi_client.exceptions import UnexpectedResponse
from qdrant_openapi_client.models.models import (
PointIdsList,
PointsList,
ScrollRequest,
PointStruct,
)
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
@property
@abstractmethod
def client(self) -> QdrantClient:
raise NotImplementedError()
@property
@abstractmethod
def serialization_config(self) -> dict:
raise NotImplementedError()
@property
@abstractmethod
def n_dim(self) -> int:
raise NotImplementedError()
@property
@abstractmethod
def collection_name(self) -> str:
raise NotImplementedError()
@property
@abstractmethod
def scroll_batch_size(self) -> int:
raise NotImplementedError()
def _upload_batch(self, docs: Iterable['Document']):
batch = []
for doc in docs:
batch.append(self._document_to_qdrant(doc))
if len(batch) > self.scroll_batch_size:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
batch = []
if len(batch) > 0:
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(points=batch),
)
def _qdrant_to_document(self, qdrant_record: dict) -> 'Document':
return Document.from_base64(
qdrant_record['_serialized'].value[0], **self.serialization_config
)
def _document_to_qdrant(self, doc: 'Document') -> 'PointStruct':
return PointStruct(
id=self._map_id(doc.id),
payload=dict(_serialized=doc.to_base64(**self.serialization_config)),
vector=self._map_embedding(doc.embedding),
)
def _get_doc_by_id(self, _id: str) -> 'Document':
try:
resp = self.client.http.points_api.get_point(
collection_name=self.collection_name, id=self._map_id(_id)
)
return self._qdrant_to_document(resp.result.payload)
except UnexpectedResponse as response_error:
if response_error.status_code in [404, 400]:
raise KeyError(_id)
def _del_doc_by_id(self, _id: str):
self.client.http.points_api.delete_points(
collection_name=self.collection_name,
wait=True,
points_selector=PointIdsList(points=[self._map_id(_id)]),
)
def _set_doc_by_id(self, _id: str, value: 'Document'):
if _id != value.id:
self._del_doc_by_id(_id)
self.client.http.points_api.upsert_points(
collection_name=self.collection_name,
wait=True,
point_insert_operations=PointsList(
points=[self._document_to_qdrant(value)]
),
)
def scan(self) -> Iterator['Document']:
offset = None
while True:
response = self.client.http.points_api.scroll_points(
collection_name=self.collection_name,
scroll_request=ScrollRequest(
offset=offset,
limit=self.scroll_batch_size,
with_payload=['_serialized'],
with_vector=False,
),
)
for point in response.result.points:
yield self._qdrant_to_document(point.payload)
if response.result.next_page_offset:
offset = response.result.next_page_offset
else:
break
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.recreate_collection(
self.collection_name,
vector_size=self.n_dim,
distance=self.distance,
)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseAnglELoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.sparse_encoder.losses.SparseCoSENTLoss import SparseCoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseAnglELoss(SparseCoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0) -> None:
"""
This class implements AnglE (Angle Optimized).
This is a modification of :class:`SparseCoSENTLoss`, designed to address the following issue:
The cosine function's gradient approaches 0 as the wave approaches the top or bottom of its form.
This can hinder the optimization process, so AnglE proposes to instead optimize the angle difference
in complex space in order to mitigate this effect.
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(k,l)-s(i,j))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition. This is the same as CoSENTLoss, with a different
similarity function.
Args:
model: SparseEncoder
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://arxiv.org/abs/2309.12871v1
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` is AnglELoss with ``pairwise_cos_sim`` as the metric, rather than ``pairwise_angle_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than ``SparseCoSENTLoss`` or ``SparseAnglELoss``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseAnglELoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale, similarity_fct=util.pairwise_angle_sim)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .ddod_head import DDODHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'DDODHead', 'Mask2FormerHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .autoassign_head import AutoAssignHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',
'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'CascadeRPNHead',
'AutoAssignHead', 'DETRHead', 'YOLOFHead', 'DeformableDETRHead',
'SOLOHead', 'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',
'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',
'Mask2FormerHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.registry import TASK_UTILS
from .random_sampler import RandomSampler
@TASK_UTILS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive samples
for each instance."""
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero(
assign_result.gt_inds == i.item(), as_tuple=False)
if inds.numel() != 0:
inds = inds.squeeze(1)
else:
continue
if len(inds) > num_per_gt:
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(
assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif len(sampled_inds) > num_expected:
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from ..builder import BBOX_SAMPLERS
from .random_sampler import RandomSampler
@BBOX_SAMPLERS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
"""Instance balanced sampler that samples equal number of positive samples
for each instance."""
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): The assigned results of boxes.
num_expected (int): The number of expected positive samples
Returns:
Tensor or ndarray: sampled indices.
"""
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = torch.nonzero(
assign_result.gt_inds == i.item(), as_tuple=False)
if inds.numel() != 0:
inds = inds.squeeze(1)
else:
continue
if len(inds) > num_per_gt:
inds = self.random_choice(inds, num_per_gt)
sampled_inds.append(inds)
sampled_inds = torch.cat(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(
list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
extra_inds = torch.from_numpy(extra_inds).to(
assign_result.gt_inds.device).long()
sampled_inds = torch.cat([sampled_inds, extra_inds])
elif len(sampled_inds) > num_expected:
sampled_inds = self.random_choice(sampled_inds, num_expected)
return sampled_inds
|
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import Requests
from backend.util.type import MediaFileType
class NvidiaDeepfakeDetectBlock(Block):
class Input(BlockSchema):
credentials: NvidiaCredentialsInput = NvidiaCredentialsField()
image_base64: MediaFileType = SchemaField(
description="Image to analyze for deepfakes",
)
return_image: bool = SchemaField(
description="Whether to return the processed image with markings",
default=False,
)
class Output(BlockSchema):
status: str = SchemaField(
description="Detection status (SUCCESS, ERROR, CONTENT_FILTERED)",
)
image: MediaFileType = SchemaField(
description="Processed image with detection markings (if return_image=True)",
)
is_deepfake: float = SchemaField(
description="Probability that the image is a deepfake (0-1)",
)
def __init__(self):
super().__init__(
id="8c7d0d67-e79c-44f6-92a1-c2600c8aac7f",
description="Detects potential deepfakes in images using Nvidia's AI API",
categories={BlockCategory.SAFETY},
input_schema=NvidiaDeepfakeDetectBlock.Input,
output_schema=NvidiaDeepfakeDetectBlock.Output,
)
def run(
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
) -> BlockOutput:
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
image_data = f"data:image/jpeg;base64,{input_data.image_base64}"
payload = {
"input": [image_data],
"return_image": input_data.return_image,
}
try:
response = Requests().post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
result = data.get("data", [{}])[0]
# Get deepfake probability from first bounding box if any
deepfake_prob = 0.0
if result.get("bounding_boxes"):
deepfake_prob = result["bounding_boxes"][0].get("is_deepfake", 0.0)
yield "status", result.get("status", "ERROR")
yield "is_deepfake", deepfake_prob
if input_data.return_image:
image_data = result.get("image", "")
output_data = f"data:image/jpeg;base64,{image_data}"
yield "image", output_data
else:
yield "image", ""
except Exception as e:
yield "error", str(e)
yield "status", "ERROR"
yield "is_deepfake", 0.0
yield "image", ""
|
from backend.blocks.nvidia._auth import (
NvidiaCredentials,
NvidiaCredentialsField,
NvidiaCredentialsInput,
)
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.request import requests
from backend.util.type import MediaFileType
class NvidiaDeepfakeDetectBlock(Block):
class Input(BlockSchema):
credentials: NvidiaCredentialsInput = NvidiaCredentialsField()
image_base64: MediaFileType = SchemaField(
description="Image to analyze for deepfakes",
)
return_image: bool = SchemaField(
description="Whether to return the processed image with markings",
default=False,
)
class Output(BlockSchema):
status: str = SchemaField(
description="Detection status (SUCCESS, ERROR, CONTENT_FILTERED)",
)
image: MediaFileType = SchemaField(
description="Processed image with detection markings (if return_image=True)",
)
is_deepfake: float = SchemaField(
description="Probability that the image is a deepfake (0-1)",
)
def __init__(self):
super().__init__(
id="8c7d0d67-e79c-44f6-92a1-c2600c8aac7f",
description="Detects potential deepfakes in images using Nvidia's AI API",
categories={BlockCategory.SAFETY},
input_schema=NvidiaDeepfakeDetectBlock.Input,
output_schema=NvidiaDeepfakeDetectBlock.Output,
)
def run(
self, input_data: Input, *, credentials: NvidiaCredentials, **kwargs
) -> BlockOutput:
url = "https://ai.api.nvidia.com/v1/cv/hive/deepfake-image-detection"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {credentials.api_key.get_secret_value()}",
}
image_data = f"data:image/jpeg;base64,{input_data.image_base64}"
payload = {
"input": [image_data],
"return_image": input_data.return_image,
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
result = data.get("data", [{}])[0]
# Get deepfake probability from first bounding box if any
deepfake_prob = 0.0
if result.get("bounding_boxes"):
deepfake_prob = result["bounding_boxes"][0].get("is_deepfake", 0.0)
yield "status", result.get("status", "ERROR")
yield "is_deepfake", deepfake_prob
if input_data.return_image:
image_data = result.get("image", "")
output_data = f"data:image/jpeg;base64,{image_data}"
yield "image", output_data
else:
yield "image", ""
except Exception as e:
yield "error", str(e)
yield "status", "ERROR"
yield "is_deepfake", 0.0
yield "image", ""
|
_base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[20, 23],
gamma=0.1)
]
|
_base_ = './mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py'
# learning policy
lr_config = dict(step=[20, 23])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from docarray.array.mixins import ParallelMixin, GroupMixin
from docarray.helper import protocol_and_compress_from_file_path
if TYPE_CHECKING: # pragma: no cover
from docarray import Document, DocumentArray
class DocumentArrayLoader(ParallelMixin, GroupMixin):
def __init__(
self,
path: Union[str, Path],
protocol: str = 'protobuf',
compress: Optional[str] = None,
show_progress: bool = False,
):
self._show_progress = show_progress
self._filename = path
self._protocol, self._compress = protocol_and_compress_from_file_path(
path, protocol, compress
)
with open(path, 'rb') as f:
version_numdocs_lendoc0 = f.read(9)
# 8 bytes (uint64)
self._len = int.from_bytes(
version_numdocs_lendoc0[1:9], 'big', signed=False
)
self._iter = iter(self)
def __iter__(self):
from docarray import Document
from docarray.array.mixins.io.pbar import get_progressbar
from rich import filesize
with open(self._filename, 'rb') as f:
f.read(9)
pbar, t = get_progressbar(
'Deserializing', disable=not self._show_progress, total=self._len
)
with pbar:
_total_size = 0
pbar.start_task(t)
for _ in range(self._len):
# 4 bytes (uint32)
len_current_doc_in_bytes = int.from_bytes(
f.read(4), 'big', signed=False
)
_total_size += len_current_doc_in_bytes
yield Document.from_bytes(
f.read(len_current_doc_in_bytes),
protocol=self._protocol,
compress=self._compress,
)
pbar.update(
t, advance=1, total_size=str(filesize.decimal(_total_size))
)
def __len__(self):
return self._len
def __getitem__(self, item: list) -> 'DocumentArray':
from docarray import DocumentArray
da = DocumentArray()
for _ in item:
da.append(next(self._iter))
return da
|
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from docarray.array.mixins import ParallelMixin, GroupMixin
from docarray.helper import protocol_and_compress_from_file_path
if TYPE_CHECKING:
from docarray import Document, DocumentArray
class DocumentArrayLoader(ParallelMixin, GroupMixin):
def __init__(
self,
path: Union[str, Path],
protocol: str = 'protobuf',
compress: Optional[str] = None,
show_progress: bool = False,
):
self._show_progress = show_progress
self._filename = path
self._protocol, self._compress = protocol_and_compress_from_file_path(
path, protocol, compress
)
with open(path, 'rb') as f:
version_numdocs_lendoc0 = f.read(9)
# 8 bytes (uint64)
self._len = int.from_bytes(
version_numdocs_lendoc0[1:9], 'big', signed=False
)
self._iter = iter(self)
def __iter__(self):
from docarray import Document
from docarray.array.mixins.io.pbar import get_progressbar
from rich import filesize
with open(self._filename, 'rb') as f:
f.read(9)
pbar, t = get_progressbar(
'Deserializing', disable=not self._show_progress, total=self._len
)
with pbar:
_total_size = 0
pbar.start_task(t)
for _ in range(self._len):
# 4 bytes (uint32)
len_current_doc_in_bytes = int.from_bytes(
f.read(4), 'big', signed=False
)
_total_size += len_current_doc_in_bytes
yield Document.from_bytes(
f.read(len_current_doc_in_bytes),
protocol=self._protocol,
compress=self._compress,
)
pbar.update(
t, advance=1, total_size=str(filesize.decimal(_total_size))
)
def __len__(self):
return self._len
def __getitem__(self, item: list) -> 'DocumentArray':
from docarray import DocumentArray
da = DocumentArray()
for _ in item:
da.append(next(self._iter))
return da
|
"""Psychic reader."""
import logging
import os
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class PsychicReader(BaseReader):
"""
Psychic reader.
Psychic is a platform that allows syncing data from many SaaS apps through one
universal API.
This reader connects to an instance of Psychic and reads data from it, given a
connector ID, account ID, and API key.
Learn more at docs.psychic.dev.
Args:
psychic_key (str): Secret key for Psychic.
Get one at https://dashboard.psychic.dev/api-keys.
"""
def __init__(self, psychic_key: Optional[str] = None) -> None:
"""Initialize with parameters."""
try:
from psychicapi import ConnectorId, Psychic
except ImportError:
raise ImportError(
"`psychicapi` package not found, please run `pip install psychicapi`"
)
if psychic_key is None:
psychic_key = os.environ["PSYCHIC_SECRET_KEY"]
if psychic_key is None:
raise ValueError(
"Must specify `psychic_key` or set environment "
"variable `PSYCHIC_SECRET_KEY`."
)
self.psychic = Psychic(secret_key=psychic_key)
self.ConnectorId = ConnectorId
def load_data(
self, connector_id: Optional[str] = None, account_id: Optional[str] = None
) -> List[Document]:
"""
Load data from a Psychic connection.
Args:
connector_id (str): The connector ID to connect to
account_id (str): The account ID to connect to
Returns:
List[Document]: List of documents.
"""
if not connector_id or not account_id:
raise ValueError("Must specify both `connector_id` and `account_id`.")
if connector_id not in self.ConnectorId.__members__:
raise ValueError("Invalid connector ID.")
# get all the documents in the database
docs = []
data = self.psychic.get_documents(self.ConnectorId[connector_id], account_id)
for resource in data:
text = resource.get("content")
doc_id = resource.get("uri")
docs.append(
Document(
text=text,
id_=doc_id,
metadata={"connector_id": connector_id, "account_id": account_id},
)
)
return docs
if __name__ == "__main__":
reader = PsychicReader(psychic_key="public_key")
logger.info(reader.load_data(connector_id="connector_id", account_id="account_id"))
|
"""Psychic reader."""
import logging
import os
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
class PsychicReader(BaseReader):
"""
Psychic reader.
Psychic is a platform that allows syncing data from many SaaS apps through one
universal API.
This reader connects to an instance of Psychic and reads data from it, given a
connector ID, account ID, and API key.
Learn more at docs.psychic.dev.
Args:
psychic_key (str): Secret key for Psychic.
Get one at https://dashboard.psychic.dev/api-keys.
"""
def __init__(self, psychic_key: Optional[str] = None) -> None:
"""Initialize with parameters."""
try:
from psychicapi import ConnectorId, Psychic
except ImportError:
raise ImportError(
"`psychicapi` package not found, please run `pip install psychicapi`"
)
if psychic_key is None:
psychic_key = os.environ["PSYCHIC_SECRET_KEY"]
if psychic_key is None:
raise ValueError(
"Must specify `psychic_key` or set environment "
"variable `PSYCHIC_SECRET_KEY`."
)
self.psychic = Psychic(secret_key=psychic_key)
self.ConnectorId = ConnectorId
def load_data(
self, connector_id: Optional[str] = None, account_id: Optional[str] = None
) -> List[Document]:
"""
Load data from a Psychic connection.
Args:
connector_id (str): The connector ID to connect to
account_id (str): The account ID to connect to
Returns:
List[Document]: List of documents.
"""
if not connector_id or not account_id:
raise ValueError("Must specify both `connector_id` and `account_id`.")
if connector_id not in self.ConnectorId.__members__:
raise ValueError("Invalid connector ID.")
# get all the documents in the database
docs = []
data = self.psychic.get_documents(self.ConnectorId[connector_id], account_id)
for resource in data:
text = resource.get("content")
doc_id = resource.get("uri")
docs.append(
Document(
text=text,
id_=doc_id,
metadata={"connector_id": connector_id, "account_id": account_id},
)
)
return docs
if __name__ == "__main__":
reader = PsychicReader(psychic_key="public_key")
logger.info(reader.load_data(connector_id="connector_id", account_id="account_id"))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute input examples for VGGish from audio waveform."""
import resampy
import sys
import os
from .vggish_params import *
from .mel_features import *
try:
import soundfile as sf
def wav_read(wav_file):
wav_data, sr = sf.read(wav_file, dtype='int16')
return wav_data, sr
except ImportError:
def wav_read(wav_file):
raise NotImplementedError('WAV file reading requires soundfile package.')
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is STFT_HOP_LENGTH_SECONDS.
"""
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
if sample_rate != SAMPLE_RATE:
data = resampy.resample(data, sample_rate, SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = log_mel_spectrogram(
data,
audio_sample_rate=SAMPLE_RATE,
log_offset=LOG_OFFSET,
window_length_secs=STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=STFT_HOP_LENGTH_SECONDS,
num_mel_bins=NUM_MEL_BINS,
lower_edge_hertz=MEL_MIN_HZ,
upper_edge_hertz=MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples
def wavfile_to_examples(wav_file):
"""Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples.
"""
wav_data, sr = wav_read(wav_file)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
return waveform_to_examples(samples, sr)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute input examples for VGGish from audio waveform."""
import resampy
import sys
import os
sys.path.append(os.getcwd())
from vggish.vggish_params import *
from vggish.mel_features import *
try:
import soundfile as sf
def wav_read(wav_file):
wav_data, sr = sf.read(wav_file, dtype='int16')
return wav_data, sr
except ImportError:
def wav_read(wav_file):
raise NotImplementedError('WAV file reading requires soundfile package.')
def waveform_to_examples(data, sample_rate):
"""Converts audio waveform into an array of examples for VGGish.
Args:
data: np.array of either one dimension (mono) or two dimensions
(multi-channel, with the outer dimension representing channels).
Each sample is generally expected to lie in the range [-1.0, +1.0],
although this is not required.
sample_rate: Sample rate of data.
Returns:
3-D np.array of shape [num_examples, num_frames, num_bands] which represents
a sequence of examples, each of which contains a patch of log mel
spectrogram, covering num_frames frames of audio and num_bands mel frequency
bands, where the frame length is STFT_HOP_LENGTH_SECONDS.
"""
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
# Resample to the rate assumed by VGGish.
if sample_rate != SAMPLE_RATE:
data = resampy.resample(data, sample_rate, SAMPLE_RATE)
# Compute log mel spectrogram features.
log_mel = log_mel_spectrogram(
data,
audio_sample_rate=SAMPLE_RATE,
log_offset=LOG_OFFSET,
window_length_secs=STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=STFT_HOP_LENGTH_SECONDS,
num_mel_bins=NUM_MEL_BINS,
lower_edge_hertz=MEL_MIN_HZ,
upper_edge_hertz=MEL_MAX_HZ)
# Frame features into examples.
features_sample_rate = 1.0 / STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
EXAMPLE_HOP_SECONDS * features_sample_rate))
log_mel_examples = frame(
log_mel,
window_length=example_window_length,
hop_length=example_hop_length)
return log_mel_examples
def wavfile_to_examples(wav_file):
"""Convenience wrapper around waveform_to_examples() for a common WAV format.
Args:
wav_file: String path to a file, or a file-like object. The file
is assumed to contain WAV audio data with signed 16-bit PCM samples.
Returns:
See waveform_to_examples.
"""
wav_data, sr = wav_read(wav_file)
assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
samples = wav_data / 32768.0 # Convert to [-1.0, +1.0]
return waveform_to_examples(samples, sr)
|
from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
from logging import getLogger
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.vector_stores.utils import metadata_dict_to_node
logger = getLogger(__name__)
def get_user_agent() -> str:
"""Get user agent for Elasticsearch client."""
import llama_index.core
version = getattr(llama_index.core, "__version__", "")
return f"llama_index-py-vs/{version}"
def get_elasticsearch_client(
url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> AsyncElasticsearch:
if url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if url:
connection_params["hosts"] = [url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = Elasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
async_es_client = AsyncElasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
sync_es_client.info() # use sync client so don't have to 'await' to just get info
return async_es_client
def convert_es_hit_to_node(
hit: Dict[str, Any], text_field: str = "content"
) -> BaseNode:
"""
Convert an Elasticsearch search hit to a BaseNode.
Args:
hit: The Elasticsearch search hit
text_field: The field name that contains the text content
Returns:
BaseNode: The converted node
"""
source = hit.get("_source", {})
metadata = source.get("metadata", {})
text = source.get(text_field, None)
node_id = hit.get("_id")
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(f"Could not parse metadata from hit {source.get('metadata')}")
node_info = source.get("node_info")
relationships = source.get("relationships", {})
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
return node
|
from typing import Any, Dict, Optional
from elasticsearch import AsyncElasticsearch, Elasticsearch
def get_user_agent() -> str:
"""Get user agent for Elasticsearch client."""
import llama_index.core
version = getattr(llama_index.core, "__version__", "")
return f"llama_index-py-vs/{version}"
def get_elasticsearch_client(
url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> AsyncElasticsearch:
if url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if url:
connection_params["hosts"] = [url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = Elasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
async_es_client = AsyncElasticsearch(
**connection_params, headers={"user-agent": get_user_agent()}
)
sync_es_client.info() # use sync client so don't have to 'await' to just get info
return async_es_client
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensors = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensors.points, np.ndarray)
def test_point_cloud_np():
pc = parse_obj_as(PointCloud3D, np.zeros((10, 3)))
assert (pc.tensors.points == np.zeros((10, 3))).all()
def test_point_cloud_torch():
pc = parse_obj_as(PointCloud3D, torch.zeros(10, 3))
assert (pc.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_tensorflow():
pc = parse_obj_as(PointCloud3D, tf.zeros((10, 3)))
assert tnp.allclose(pc.tensors.points.tensor, tf.zeros((10, 3)))
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
pc3: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=np.zeros((10, 3)),
pc3=torch.zeros(10, 3),
)
assert doc.pc.url == 'http://myurl.ply'
assert (doc.pc2.tensors.points == np.zeros((10, 3))).all()
assert (doc.pc3.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_shortcut_doc_tf():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=tf.zeros((10, 3)),
)
assert doc.pc.url == 'http://myurl.ply'
assert tnp.allclose(doc.pc2.tensors.points.tensor, tf.zeros((10, 3)))
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import PointCloud3D
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensors = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensors.points, np.ndarray)
def test_point_cloud_np():
pc = parse_obj_as(PointCloud3D, np.zeros((10, 3)))
assert (pc.tensors.points == np.zeros((10, 3))).all()
def test_point_cloud_torch():
pc = parse_obj_as(PointCloud3D, torch.zeros(10, 3))
assert (pc.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_tensorflow():
pc = parse_obj_as(PointCloud3D, tf.zeros((10, 3)))
assert tnp.allclose(pc.tensors.points.tensor, tf.zeros((10, 3)))
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDocument):
pc: PointCloud3D
pc2: PointCloud3D
pc3: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=np.zeros((10, 3)),
pc3=torch.zeros(10, 3),
)
assert doc.pc.url == 'http://myurl.ply'
assert (doc.pc2.tensors.points == np.zeros((10, 3))).all()
assert (doc.pc3.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_shortcut_doc_tf():
class MyDoc(BaseDocument):
pc: PointCloud3D
pc2: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=tf.zeros((10, 3)),
)
assert doc.pc.url == 'http://myurl.ply'
assert tnp.allclose(doc.pc2.tensors.points.tensor, tf.zeros((10, 3)))
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from mmdet.registry import MODELS
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@MODELS.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@LOSSES.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
from langchain_core.runnables.history import (
GetSessionHistoryCallable,
MessagesOrDictWithMessages,
RunnableWithMessageHistory,
)
__all__ = [
"GetSessionHistoryCallable",
"MessagesOrDictWithMessages",
"RunnableWithMessageHistory",
]
|
from langchain_core.runnables.history import (
GetSessionHistoryCallable,
MessagesOrDictWithMessages,
RunnableWithMessageHistory,
)
__all__ = [
"RunnableWithMessageHistory",
"GetSessionHistoryCallable",
"MessagesOrDictWithMessages",
]
|
from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import extract_archive
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_CHECKSUM = "781f12f4406ed36ed27ae3bce55da47ba176e2d8bae67319e389e07b2c9bd769"
_SUPPORTED_SUBSETS = {"train", "test"}
class DR_VCTK(Dataset):
"""Create a dataset for *Device Recorded VCTK (Small subset version)* [:footcite:`Sarfjoo2018DeviceRV`].
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): The subset to use. Can be one of ``"train"`` and ``"test"``. (default: ``"train"``).
download (bool):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str): The URL to download the dataset from.
(default: ``"https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train",
*,
download: bool = False,
url: str = _URL,
) -> None:
if subset not in _SUPPORTED_SUBSETS:
raise RuntimeError(
f"The subset '{subset}' does not match any of the supported subsets: {_SUPPORTED_SUBSETS}"
)
root = Path(root).expanduser()
archive = root / "DR-VCTK.zip"
self._subset = subset
self._path = root / "DR-VCTK" / "DR-VCTK"
self._clean_audio_dir = self._path / f"clean_{self._subset}set_wav_16k"
self._noisy_audio_dir = self._path / f"device-recorded_{self._subset}set_wav_16k"
self._config_filepath = self._path / "configurations" / f"{self._subset}_ch_log.txt"
if not self._path.is_dir():
if not archive.is_file():
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
download_url_to_file(url, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
self._config = self._load_config(self._config_filepath)
self._filename_list = sorted(self._config)
def _load_config(self, filepath: str) -> Dict[str, Tuple[str, int]]:
# Skip header
skip_rows = 2 if self._subset == "train" else 1
config = {}
with open(filepath) as f:
for i, line in enumerate(f):
if i < skip_rows or not line:
continue
filename, source, channel_id = line.strip().split("\t")
config[filename] = (source, int(channel_id))
return config
def _load_dr_vctk_item(self, filename: str) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
speaker_id, utterance_id = filename.split(".")[0].split("_")
source, channel_id = self._config[filename]
file_clean_audio = self._clean_audio_dir / filename
file_noisy_audio = self._noisy_audio_dir / filename
waveform_clean, sample_rate_clean = torchaudio.load(file_clean_audio)
waveform_noisy, sample_rate_noisy = torchaudio.load(file_noisy_audio)
return (
waveform_clean,
sample_rate_clean,
waveform_noisy,
sample_rate_noisy,
speaker_id,
utterance_id,
source,
channel_id,
)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Tensor, int, str, str, str, int):
``(waveform_clean, sample_rate_clean, waveform_noisy, sample_rate_noisy, speaker_id,\
utterance_id, source, channel_id)``
"""
filename = self._filename_list[n]
return self._load_dr_vctk_item(filename)
def __len__(self) -> int:
return len(self._filename_list)
|
from pathlib import Path
from typing import Dict, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
extract_archive,
)
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_CHECKSUM = "781f12f4406ed36ed27ae3bce55da47ba176e2d8bae67319e389e07b2c9bd769"
_SUPPORTED_SUBSETS = {"train", "test"}
class DR_VCTK(Dataset):
"""Create a dataset for *Device Recorded VCTK (Small subset version)* [:footcite:`Sarfjoo2018DeviceRV`].
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): The subset to use. Can be one of ``"train"`` and ``"test"``. (default: ``"train"``).
download (bool):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str): The URL to download the dataset from.
(default: ``"https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train",
*,
download: bool = False,
url: str = _URL,
) -> None:
if subset not in _SUPPORTED_SUBSETS:
raise RuntimeError(
f"The subset '{subset}' does not match any of the supported subsets: {_SUPPORTED_SUBSETS}"
)
root = Path(root).expanduser()
archive = root / "DR-VCTK.zip"
self._subset = subset
self._path = root / "DR-VCTK" / "DR-VCTK"
self._clean_audio_dir = self._path / f"clean_{self._subset}set_wav_16k"
self._noisy_audio_dir = self._path / f"device-recorded_{self._subset}set_wav_16k"
self._config_filepath = self._path / "configurations" / f"{self._subset}_ch_log.txt"
if not self._path.is_dir():
if not archive.is_file():
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
download_url_to_file(url, archive, hash_prefix=_CHECKSUM)
extract_archive(archive, root)
self._config = self._load_config(self._config_filepath)
self._filename_list = sorted(self._config)
def _load_config(self, filepath: str) -> Dict[str, Tuple[str, int]]:
# Skip header
skip_rows = 2 if self._subset == "train" else 1
config = {}
with open(filepath) as f:
for i, line in enumerate(f):
if i < skip_rows or not line:
continue
filename, source, channel_id = line.strip().split("\t")
config[filename] = (source, int(channel_id))
return config
def _load_dr_vctk_item(self, filename: str) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
speaker_id, utterance_id = filename.split(".")[0].split("_")
source, channel_id = self._config[filename]
file_clean_audio = self._clean_audio_dir / filename
file_noisy_audio = self._noisy_audio_dir / filename
waveform_clean, sample_rate_clean = torchaudio.load(file_clean_audio)
waveform_noisy, sample_rate_noisy = torchaudio.load(file_noisy_audio)
return (
waveform_clean,
sample_rate_clean,
waveform_noisy,
sample_rate_noisy,
speaker_id,
utterance_id,
source,
channel_id,
)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Tensor, int, str, str, str, int):
``(waveform_clean, sample_rate_clean, waveform_noisy, sample_rate_noisy, speaker_id,\
utterance_id, source, channel_id)``
"""
filename = self._filename_list[n]
return self._load_dr_vctk_item(filename)
def __len__(self) -> int:
return len(self._filename_list)
|
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
BaseTransformOutputParser,
StrOutputParser,
)
from langchain_core.output_parsers.base import T
# Backwards compatibility.
NoOpOutputParser = StrOutputParser
__all__ = [
"BaseCumulativeTransformOutputParser",
"BaseGenerationOutputParser",
"BaseLLMOutputParser",
"BaseOutputParser",
"BaseTransformOutputParser",
"NoOpOutputParser",
"OutputParserException",
"StrOutputParser",
"T",
]
|
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import (
BaseCumulativeTransformOutputParser,
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
BaseTransformOutputParser,
StrOutputParser,
)
from langchain_core.output_parsers.base import T
# Backwards compatibility.
NoOpOutputParser = StrOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"NoOpOutputParser",
"StrOutputParser",
"OutputParserException",
"T",
]
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
# Save the model
model.push_to_hub(
"sparse-embedding/splade_example",
private=True,
)
# Load the model
loaded_model = SparseEncoder("sparse-embedding/splade_example")
print(f"Loaded model: {loaded_model}")
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = loaded_model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {loaded_model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = loaded_model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
import numpy as np
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.models import MLMTransformer, SpladePooling
def main():
# Initialize the SPLADE model
model_name = "opensearch-project/opensearch-neural-sparse-encoding-doc-v2-distill" # "naver/efficient-splade-V-large-doc" # "prithivida/Splade_PP_en_v1" # "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding sim and sparsity
print(f"Embedding dim: {model.get_sentence_embedding_dimension()}")
print(f"Embedding sparsity: {model.get_sparsity_stats(embeddings)}")
# Compute similarity matrix
similarity_matrix = model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
# Save the model
model.push_to_hub(
"splade_example",
private=True,
)
# Load the model
loaded_model = SparseEncoder("arthurbresnu/splade_example")
print(f"Loaded model: {loaded_model}")
# Sample texts
texts = [
"The weather is lovely today.",
"It's so sunny outside!",
"He drove to the stadium.",
]
# Generate embeddings
embeddings = loaded_model.encode(texts, convert_to_sparse_tensor=True)
print(type(embeddings))
# Print embedding shape and sparsity
print(f"Embedding shape: {embeddings.shape}")
print(f"Embedding sparsity: {loaded_model.get_sparsity_stats(embeddings)}%")
# Compute similarity matrix
similarity_matrix = loaded_model.similarity(embeddings, embeddings)
# Print similarity matrix
print("\nSimilarity Matrix:")
for i, text in enumerate(texts):
print(f"{i}: {text[:50]}...")
print("\n" + " " * 10 + " ".join([f"{i:5d}" for i in range(len(texts))]))
for i, row in enumerate(similarity_matrix):
print(f"{i:5d} " + " ".join([f"{val:.3f}" for val in row]))
vocab_size = embeddings.shape[1]
print(f"Vocabulary size: {vocab_size}")
# Visualize top tokens for each text
top_k = 20
print(f"\nTop tokens {top_k} for each text:")
for i, text in enumerate(texts):
# Get top k indices in sparse tensor
# Get top k indices in sparse tensor (sorted from highest to lowest)
top_indices = np.argsort(-embeddings[i].to_dense().cpu().numpy())[:top_k]
top_values = embeddings[i].to_dense().cpu().numpy()[top_indices]
top_tokens = [model.tokenizer.decode([idx]) for idx in top_indices]
print(f"{i}: {text}")
print(f"Top tokens: {top_tokens}")
print(f"Top values: {top_values}")
print()
if __name__ == "__main__":
main()
|
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
from docarray.array.doc_vec.doc_vec import DocVec
__all__ = ['DocList', 'DocVec', 'AnyDocArray']
|
from docarray.array.document import DocumentArray
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(15, 15),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = [
BaseDataElement(
metainfo=dict(ori_height=15, ori_width=15,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataSample
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = [
BaseDataSample(
metainfo=dict(
scale=(15, 15),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = [
BaseDataSample(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = [
BaseDataSample(
metainfo=dict(ori_height=15, ori_width=15,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataSample()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.array.abstract_array import AnyDocumentArray
from docarray.documents import TextDoc
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDocument):
sub_sub_text: TextDoc
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDocument):
sub_text: TextDoc
sub_da: DocumentArray[SubSubDoc]
class MultiModalDoc(BaseDocument):
mm_text: TextDoc
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocumentArray[SubDoc]
docs = DocumentArray[MultiModalDoc](
[
MultiModalDoc(
mm_text=TextDoc(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=TextDoc(text=f'sub_{i}_1'),
sub_da=DocumentArray[SubSubDoc](
[
SubSubDoc(
sub_sub_text=TextDoc(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text__text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da__sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da__sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da__sub_da__sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.stack()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDocument()
input_list = [DocumentArray([doc, doc, doc])]
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
from typing import Optional
import pytest
import torch
from docarray import BaseDocument, DocumentArray
from docarray.array.abstract_array import AnyDocumentArray
from docarray.documents import Text
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDocument):
sub_sub_text: Text
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDocument):
sub_text: Text
sub_da: DocumentArray[SubSubDoc]
class MultiModalDoc(BaseDocument):
mm_text: Text
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocumentArray[SubDoc]
docs = DocumentArray[MultiModalDoc](
[
MultiModalDoc(
mm_text=Text(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=Text(text=f'sub_{i}_1'),
sub_da=DocumentArray[SubSubDoc](
[
SubSubDoc(
sub_sub_text=Text(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text__text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da__sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da__sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da__sub_da__sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDocument):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.stack()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDocument()
input_list = [DocumentArray([doc, doc, doc])]
flattened = AnyDocumentArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_resources will be imported and it takes too much time.
# Therefore, import it in function scope to save time.
import importlib.util
import pkg_resources
from pkg_resources import get_distribution
# refresh the pkg_resources
# more datails at https://github.com/pypa/setuptools/issues/373
importlib.reload(pkg_resources)
try:
get_distribution(package)
return True
except pkg_resources.DistributionNotFound:
return importlib.util.find_spec(package) is not None
def get_installed_path(package: str) -> str:
"""Get installed path of package.
Args:
package (str): Name of package.
Example:
>>> get_installed_path('mmcls')
>>> '.../lib/python3.7/site-packages/mmcls'
"""
import importlib.util
from pkg_resources import DistributionNotFound, get_distribution
# if the package name is not the same as module name, module name should be
# inferred. For example, mmcv-full is the package name, but mmcv is module
# name. If we want to get the installed path of mmcv-full, we should concat
# the pkg.location and module name
try:
pkg = get_distribution(package)
except DistributionNotFound as e:
# if the package is not installed, package path set in PYTHONPATH
# can be detected by `find_spec`
spec = importlib.util.find_spec(package)
if spec is not None:
if spec.origin is not None:
return osp.dirname(spec.origin)
# For namespace packages, the origin is None, and the first path
# in submodule_search_locations will be returned.
# namespace packages: https://packaging.python.org/en/latest/guides/packaging-namespace-packages/ # noqa: E501
elif spec.submodule_search_locations is not None:
locations = spec.submodule_search_locations
if isinstance(locations, list):
return locations[0]
else:
# `submodule_search_locations` is not subscriptable in
# python3.7. There for we use `_path` to get the first
# path.
return locations._path[0] # type: ignore
else:
raise e
else:
raise e
possible_path = osp.join(pkg.location, package)
if osp.exists(possible_path):
return possible_path
else:
return osp.join(pkg.location, package2module(package))
def package2module(package: str):
"""Infer module name from package.
Args:
package (str): Package to infer module name.
"""
from pkg_resources import get_distribution
pkg = get_distribution(package)
if pkg.has_metadata('top_level.txt'):
module_name = pkg.get_metadata('top_level.txt').split('\n')[0]
return module_name
else:
raise ValueError(f'can not infer the module name of {package}')
def call_command(cmd: list) -> None:
try:
subprocess.check_call(cmd)
except Exception as e:
raise e # type: ignore
def install_package(package: str):
if not is_installed(package):
call_command(['python', '-m', 'pip', 'install', package])
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
import subprocess
def is_installed(package: str) -> bool:
"""Check package whether installed.
Args:
package (str): Name of package to be checked.
"""
# When executing `import mmengine.runner`,
# pkg_resources will be imported and it takes too much time.
# Therefore, import it in function scope to save time.
import pkg_resources
from pkg_resources import get_distribution
# refresh the pkg_resources
# more datails at https://github.com/pypa/setuptools/issues/373
importlib.reload(pkg_resources)
try:
get_distribution(package)
return True
except pkg_resources.DistributionNotFound:
return False
def get_installed_path(package: str) -> str:
"""Get installed path of package.
Args:
package (str): Name of package.
Example:
>>> get_installed_path('mmcls')
>>> '.../lib/python3.7/site-packages/mmcls'
"""
from pkg_resources import get_distribution
# if the package name is not the same as module name, module name should be
# inferred. For example, mmcv-full is the package name, but mmcv is module
# name. If we want to get the installed path of mmcv-full, we should concat
# the pkg.location and module name
pkg = get_distribution(package)
possible_path = osp.join(pkg.location, package)
if osp.exists(possible_path):
return possible_path
else:
return osp.join(pkg.location, package2module(package))
def package2module(package: str):
"""Infer module name from package.
Args:
package (str): Package to infer module name.
"""
from pkg_resources import get_distribution
pkg = get_distribution(package)
if pkg.has_metadata('top_level.txt'):
module_name = pkg.get_metadata('top_level.txt').split('\n')[0]
return module_name
else:
raise ValueError(f'can not infer the module name of {package}')
def call_command(cmd: list) -> None:
try:
subprocess.check_call(cmd)
except Exception as e:
raise e # type: ignore
def install_package(package: str):
if not is_installed(package):
call_command(['python', '-m', 'pip', 'install', package])
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g learning rate
and momentum."""
priority = 'LOW'
def after_iter(self,
runner: object,
data_batch: Optional[Sequence[Tuple[
Any, BaseDataSample]]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (object): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (Sequence[BaseDataSample], optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_epoch(self, runner: object) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (object): The runner of the training process.
"""
for scheduler in runner.schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g learning rate
and momentum."""
priority = 'LOW'
def after_iter(self,
runner: object,
data_batch: Optional[Sequence[BaseDataSample]] = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (object): The runner of the training process.
data_batch (Sequence[BaseDataSample]): Data from dataloader. In
order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model. In
order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_epoch(self, runner: object) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (object): The runner of the training process.
"""
for scheduler in runner.schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainingArguments
from sentence_transformers.cross_encoder.evaluation import CrossEncoderClassificationEvaluator
from sentence_transformers.cross_encoder.losses import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the Quora duplicates dataset: https://huggingface.co/datasets/sentence-transformers/quora-duplicates
logging.info("Read quora-duplicates train dataset")
dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train")
eval_dataset = dataset.select(range(10_000))
test_dataset = dataset.select(range(10_000, 20_000))
train_dataset = dataset.select(range(20_000, len(dataset)))
logging.info(train_dataset)
logging.info(eval_dataset)
logging.info(test_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-quora-duplicates"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the Quora Duplicate Questions Detection task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_quora_duplicate_questions.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder, CrossEncoderTrainingArguments
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses import BinaryCrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_quora-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 1 label
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, or rerankers like Alibaba-NLP/gte-reranker-modernbert-base
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=1)
# 2. Load the Quora duplicates dataset: https://huggingface.co/datasets/sentence-transformers/quora-duplicates
logging.info("Read quora-duplicates train dataset")
dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train")
eval_dataset = dataset.select(range(10_000))
test_dataset = dataset.select(range(10_000, 20_000))
train_dataset = dataset.select(range(20_000, len(dataset)))
logging.info(train_dataset)
logging.info(eval_dataset)
logging.info(test_dataset)
# 3. Define our training loss, we use one that accepts pairs with a binary label
loss = BinaryCrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-quora-duplicates"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["sentence1"], eval_dataset["sentence2"])),
labels=eval_dataset["label"],
name="quora-duplicates-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms_marco_dev_small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms_marco_dev_small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.61
MRR@10: 54.30
NDCG@10: 65.20
Model Query Sparsity: Active Dimensions: 43.9, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 128.4, Sparsity Ratio: 0.9958
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms_marco_dev_small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6520
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseRerankingEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
"""
RerankingEvaluator: Evaluating the model on the ms-marco-dev-small dataset:
Queries: 967 Positives: Min 1.0, Mean 1.1, Max 3.0 Negatives: Min 1.0, Mean 7.1, Max 9.0
MAP: 53.46
MRR@10: 54.18
NDCG@10: 65.10
Model Sparsity Stats Query : Row Non-Zero Mean: 43.89658737182617, Row Sparsity Mean: 0.9985617995262146
Model Sparsity Stats Corpus : Row Non-Zero Mean: 128.37216186523438, Row Sparsity Mean: 0.9957940578460693
"""
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
# => Primary metric: ms-marco-dev-small_ndcg@10
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6510
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .ema_hook import EMAHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook', 'NaiveVisualizationHook', 'EMAHook'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .checkpoint_hook import CheckpointHook
from .empty_cache_hook import EmptyCacheHook
from .hook import Hook
from .iter_timer_hook import IterTimerHook
from .logger_hook import LoggerHook
from .naive_visualization_hook import NaiveVisualizationHook
from .optimizer_hook import OptimizerHook
from .param_scheduler_hook import ParamSchedulerHook
from .sampler_seed_hook import DistSamplerSeedHook
from .sync_buffer_hook import SyncBuffersHook
__all__ = [
'Hook', 'IterTimerHook', 'DistSamplerSeedHook', 'ParamSchedulerHook',
'OptimizerHook', 'SyncBuffersHook', 'EmptyCacheHook', 'CheckpointHook',
'LoggerHook', 'NaiveVisualizationHook'
]
|
# mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _create_fake_pg(prefix_store, rank, world_size, timeout):
"""
A fake process group (not related to FakeTensor) is a process group which
doesn't actually do any communication, it just hallucinates some
communication. You can run a single rank with a fake process group
without needing multiple processes (simulates per-rank behavior)
NOTE: This is not a real process group, and it would produce wrong results
for every collective. It should be used as a convinient tool when playing
with distributed but don't care about the actual data.
"""
return FakeProcessGroup(rank, world_size)
dist.Backend.register_backend("fake", _create_fake_pg, devices=["cpu", "cuda"])
|
# mypy: allow-untyped-defs
import torch.distributed as dist
from torch._C._distributed_c10d import FakeProcessGroup
class FakeStore(dist.Store):
"""
A fake store is a fake Key-Value store simply for initialization usage
the of fake process group, one can either use FakeStore or HashStore.
"""
def _create_fake_pg(prefix_store, rank, world_size, timeout):
"""
A fake process group (not related to FakeTensor) is a process group which
doesn't actually do any communication, it just hallucinates some
communication. You can run a single rank with a fake process group
without needing multiple processes (simulates per-rank behavior)
NOTE: This is not a real process group, and it would produce wrong results
for every collective. It should be used as a convenient tool when playing
with distributed but don't care about the actual data.
"""
return FakeProcessGroup(rank, world_size)
dist.Backend.register_backend("fake", _create_fake_pg, devices=["cpu", "cuda"])
|
import logging
import os
import threading
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|THREAD-{threading.get_native_id()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(resource_name: str, action_name: str, max_retry: int = 5):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.error(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
# Define the retrying strategy
retrying_func = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=1, min=1, max=30),
before_sleep=on_retry,
reraise=True,
)(func)
try:
result = retrying_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return wrapper
return decorator
|
import logging
import os
from functools import wraps
from uuid import uuid4
from tenacity import retry, stop_after_attempt, wait_exponential
from backend.util.process import get_service_name
logger = logging.getLogger(__name__)
def _log_prefix(resource_name: str, conn_id: str):
"""
Returns a prefix string for logging purposes.
This needs to be called on the fly to get the current process ID & service name,
not the parent process ID & service name.
"""
return f"[PID-{os.getpid()}|{get_service_name()}|{resource_name}-{conn_id}]"
def conn_retry(resource_name: str, action_name: str, max_retry: int = 5):
conn_id = str(uuid4())
def on_retry(retry_state):
prefix = _log_prefix(resource_name, conn_id)
exception = retry_state.outcome.exception()
logger.info(f"{prefix} {action_name} failed: {exception}. Retrying now...")
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
prefix = _log_prefix(resource_name, conn_id)
logger.info(f"{prefix} {action_name} started...")
# Define the retrying strategy
retrying_func = retry(
stop=stop_after_attempt(max_retry + 1),
wait=wait_exponential(multiplier=1, min=1, max=30),
before_sleep=on_retry,
reraise=True,
)(func)
try:
result = retrying_func(*args, **kwargs)
logger.info(f"{prefix} {action_name} completed successfully.")
return result
except Exception as e:
logger.error(f"{prefix} {action_name} failed after retries: {e}")
raise
return wrapper
return decorator
|
import json
import numpy as np
import xgboost as xgb
rng = np.random.RandomState(1994)
class TestGPUTrainingContinuation:
def test_training_continuation(self):
kRows = 64
kCols = 32
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
dtrain = xgb.DMatrix(X, y)
params = {
"tree_method": "gpu_hist",
"max_depth": "2",
"gamma": "0.1",
"alpha": "0.01",
}
bst_0 = xgb.train(params, dtrain, num_boost_round=64)
dump_0 = bst_0.get_dump(dump_format="json")
bst_1 = xgb.train(params, dtrain, num_boost_round=32)
bst_1 = xgb.train(params, dtrain, num_boost_round=32, xgb_model=bst_1)
dump_1 = bst_1.get_dump(dump_format="json")
def recursive_compare(obj_0, obj_1):
if isinstance(obj_0, float):
assert np.isclose(obj_0, obj_1, atol=1e-6)
elif isinstance(obj_0, str):
assert obj_0 == obj_1
elif isinstance(obj_0, int):
assert obj_0 == obj_1
elif isinstance(obj_0, dict):
keys_0 = list(obj_0.keys())
keys_1 = list(obj_1.keys())
values_0 = list(obj_0.values())
values_1 = list(obj_1.values())
for i in range(len(obj_0.items())):
assert keys_0[i] == keys_1[i]
if list(obj_0.keys())[i] != "missing":
recursive_compare(values_0[i], values_1[i])
else:
for i in range(len(obj_0)):
recursive_compare(obj_0[i], obj_1[i])
assert len(dump_0) == len(dump_1)
for i in range(len(dump_0)):
obj_0 = json.loads(dump_0[i])
obj_1 = json.loads(dump_1[i])
recursive_compare(obj_0, obj_1)
|
import json
import numpy as np
import xgboost as xgb
rng = np.random.RandomState(1994)
class TestGPUTrainingContinuation:
def test_training_continuation(self):
kRows = 64
kCols = 32
X = np.random.randn(kRows, kCols)
y = np.random.randn(kRows)
dtrain = xgb.DMatrix(X, y)
params = {'tree_method': 'gpu_hist', 'max_depth': '2',
'gamma': '0.1', 'alpha': '0.01'}
bst_0 = xgb.train(params, dtrain, num_boost_round=64)
dump_0 = bst_0.get_dump(dump_format='json')
bst_1 = xgb.train(params, dtrain, num_boost_round=32)
bst_1 = xgb.train(params, dtrain, num_boost_round=32, xgb_model=bst_1)
dump_1 = bst_1.get_dump(dump_format='json')
def recursive_compare(obj_0, obj_1):
if isinstance(obj_0, float):
assert np.isclose(obj_0, obj_1, atol=1e-6)
elif isinstance(obj_0, str):
assert obj_0 == obj_1
elif isinstance(obj_0, int):
assert obj_0 == obj_1
elif isinstance(obj_0, dict):
keys_0 = list(obj_0.keys())
keys_1 = list(obj_1.keys())
values_0 = list(obj_0.values())
values_1 = list(obj_1.values())
for i in range(len(obj_0.items())):
assert keys_0[i] == keys_1[i]
if list(obj_0.keys())[i] != 'missing':
recursive_compare(values_0[i],
values_1[i])
else:
for i in range(len(obj_0)):
recursive_compare(obj_0[i], obj_1[i])
assert len(dump_0) == len(dump_1)
for i in range(len(dump_0)):
obj_0 = json.loads(dump_0[i])
obj_1 = json.loads(dump_1[i])
recursive_compare(obj_0, obj_1)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ClickhouseSettings": "langchain_community.vectorstores",
"Clickhouse": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Clickhouse",
"ClickhouseSettings",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import Clickhouse, ClickhouseSettings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ClickhouseSettings": "langchain_community.vectorstores",
"Clickhouse": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ClickhouseSettings",
"Clickhouse",
]
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
'rocksdict<=0.2.16',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite==0.3.13',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.12',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
"""Table node mapping."""
import uuid
from typing import Any, Dict, Optional, Sequence
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.objects.base_node_mapping import (
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
BaseObjectNodeMapping,
)
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.utilities.sql_wrapper import SQLDatabase
class SQLTableSchema(BaseModel):
"""Lightweight representation of a SQL table."""
table_name: str
context_str: Optional[str] = None
class SQLTableNodeMapping(BaseObjectNodeMapping[SQLTableSchema]):
"""SQL Table node mapping."""
def __init__(self, sql_database: SQLDatabase) -> None:
self._sql_database = sql_database
@classmethod
def from_objects(
cls,
objs: Sequence[SQLTableSchema],
*args: Any,
sql_database: Optional[SQLDatabase] = None,
**kwargs: Any,
) -> "BaseObjectNodeMapping":
"""Initialize node mapping."""
if sql_database is None:
raise ValueError("Must provide sql_database")
# ignore objs, since we are building from sql_database
return cls(sql_database)
def _add_object(self, obj: SQLTableSchema) -> None:
raise NotImplementedError
def to_node(self, obj: SQLTableSchema) -> TextNode:
"""To node."""
# taken from existing schema logic
table_text = (
f"Schema of table {obj.table_name}:\n"
f"{self._sql_database.get_single_table_info(obj.table_name)}\n"
)
metadata = {"name": obj.table_name}
if obj.context_str is not None:
table_text += f"Context of table {obj.table_name}:\n"
table_text += obj.context_str
metadata["context"] = obj.context_str
table_identity = f"{obj.table_name}{obj.context_str}"
return TextNode(
id_=str(uuid.uuid5(namespace=uuid.NAMESPACE_DNS, name=table_identity)),
text=table_text,
metadata=metadata,
excluded_embed_metadata_keys=["name", "context"],
excluded_llm_metadata_keys=["name", "context"],
)
def _from_node(self, node: BaseNode) -> SQLTableSchema:
"""From node."""
if node.metadata is None:
raise ValueError("Metadata must be set")
return SQLTableSchema(
table_name=node.metadata["name"], context_str=node.metadata.get("context")
)
@property
def obj_node_mapping(self) -> Dict[int, Any]:
"""The mapping data structure between node and object."""
raise NotImplementedError("Subclasses should implement this!")
def persist(
self, persist_dir: str = ..., obj_node_mapping_fname: str = ...
) -> None:
"""Persist objs."""
raise NotImplementedError("Subclasses should implement this!")
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
) -> "SQLTableNodeMapping":
raise NotImplementedError(
"This object node mapping does not support persist method."
)
|
"""Table node mapping."""
from typing import Any, Dict, Optional, Sequence
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.objects.base_node_mapping import (
DEFAULT_PERSIST_DIR,
DEFAULT_PERSIST_FNAME,
BaseObjectNodeMapping,
)
from llama_index.core.schema import BaseNode, TextNode
from llama_index.core.utilities.sql_wrapper import SQLDatabase
class SQLTableSchema(BaseModel):
"""Lightweight representation of a SQL table."""
table_name: str
context_str: Optional[str] = None
class SQLTableNodeMapping(BaseObjectNodeMapping[SQLTableSchema]):
"""SQL Table node mapping."""
def __init__(self, sql_database: SQLDatabase) -> None:
self._sql_database = sql_database
@classmethod
def from_objects(
cls,
objs: Sequence[SQLTableSchema],
*args: Any,
sql_database: Optional[SQLDatabase] = None,
**kwargs: Any,
) -> "BaseObjectNodeMapping":
"""Initialize node mapping."""
if sql_database is None:
raise ValueError("Must provide sql_database")
# ignore objs, since we are building from sql_database
return cls(sql_database)
def _add_object(self, obj: SQLTableSchema) -> None:
raise NotImplementedError
def to_node(self, obj: SQLTableSchema) -> TextNode:
"""To node."""
# taken from existing schema logic
table_text = (
f"Schema of table {obj.table_name}:\n"
f"{self._sql_database.get_single_table_info(obj.table_name)}\n"
)
metadata = {"name": obj.table_name}
if obj.context_str is not None:
table_text += f"Context of table {obj.table_name}:\n"
table_text += obj.context_str
metadata["context"] = obj.context_str
table_identity = f"{obj.table_name}{obj.context_str}"
return TextNode(
id_=str(hash(table_identity)),
text=table_text,
metadata=metadata,
excluded_embed_metadata_keys=["name", "context"],
excluded_llm_metadata_keys=["name", "context"],
)
def _from_node(self, node: BaseNode) -> SQLTableSchema:
"""From node."""
if node.metadata is None:
raise ValueError("Metadata must be set")
return SQLTableSchema(
table_name=node.metadata["name"], context_str=node.metadata.get("context")
)
@property
def obj_node_mapping(self) -> Dict[int, Any]:
"""The mapping data structure between node and object."""
raise NotImplementedError("Subclasses should implement this!")
def persist(
self, persist_dir: str = ..., obj_node_mapping_fname: str = ...
) -> None:
"""Persist objs."""
raise NotImplementedError("Subclasses should implement this!")
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
) -> "SQLTableNodeMapping":
raise NotImplementedError(
"This object node mapping does not support persist method."
)
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=[(2048, 800), (2048, 1024)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
test_evaluator = val_evaluator
|
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomResize', scale=[(2048, 800), (2048, 1024)]),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='Resize', scale=(2048, 1024), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=8,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_train.json',
data_prefix=dict(img='leftImg8bit/train/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instancesonly_filtered_gtFine_val.json',
data_prefix=dict(img='leftImg8bit/val/'),
test_mode=True,
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instancesonly_filtered_gtFine_val.json',
metric='bbox')
test_evaluator = val_evaluator
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
["Convolve", "FFTConvolve"],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = getattr(T, cls)(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
|
import torch
import torchaudio.prototype.transforms as T
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class Transforms(TestBaseMixin):
@nested_params(
[T.Convolve, T.FFTConvolve],
["full", "valid", "same"],
)
def test_Convolve(self, cls, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
convolve = cls(mode=mode).to(device=self.device, dtype=self.dtype)
output = convolve(x, y)
ts_output = torch_script(convolve)(x, y)
self.assertEqual(ts_output, output)
|
"""Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> list[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [1.0] * 9 + [0.0]
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text)),
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> list[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
"""Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> list[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [1.0] * 9 + [0.0]
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> list[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import skipIfNoModule, TorchaudioTestCase
from .utils import MockCustomDataset, MockDataloader, MockSentencePieceProcessor
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.mustc.lightning import MuSTCRNNTModule
class MockMUSTC:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
"sup",
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.mustc.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("asr.emformer_rnnt.mustc.lightning.MUSTC", new=MockMUSTC), patch(
"asr.emformer_rnnt.mustc.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield MuSTCRNNTModule(
mustc_path="mustc_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestMuSTCRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_common_dataloader"),
("test_step", "test_he_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import torch
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
from .utils import MockSentencePieceProcessor, MockCustomDataset, MockDataloader
if is_module_available("pytorch_lightning", "sentencepiece"):
from asr.emformer_rnnt.mustc.lightning import MuSTCRNNTModule
class MockMUSTC:
def __init__(self, *args, **kwargs):
pass
def __getitem__(self, n: int):
return (
torch.rand(1, 32640),
"sup",
)
def __len__(self):
return 10
@contextmanager
def get_lightning_module():
with patch("sentencepiece.SentencePieceProcessor", new=partial(MockSentencePieceProcessor, num_symbols=500)), patch(
"asr.emformer_rnnt.mustc.lightning.GlobalStatsNormalization", new=torch.nn.Identity
), patch("asr.emformer_rnnt.mustc.lightning.MUSTC", new=MockMUSTC), patch(
"asr.emformer_rnnt.mustc.lightning.CustomDataset", new=MockCustomDataset
), patch(
"torch.utils.data.DataLoader", new=MockDataloader
):
yield MuSTCRNNTModule(
mustc_path="mustc_path",
sp_model_path="sp_model_path",
global_stats_path="global_stats_path",
)
@skipIfNoModule("pytorch_lightning")
@skipIfNoModule("sentencepiece")
class TestMuSTCRNNTModule(TorchaudioTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
torch.random.manual_seed(31)
@parameterized.expand(
[
("training_step", "train_dataloader"),
("validation_step", "val_dataloader"),
("test_step", "test_common_dataloader"),
("test_step", "test_he_dataloader"),
]
)
def test_step(self, step_fname, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
getattr(lightning_module, step_fname)(batch, 0)
@parameterized.expand(
[
("val_dataloader",),
]
)
def test_forward(self, dataloader_fname):
with get_lightning_module() as lightning_module:
dataloader = getattr(lightning_module, dataloader_fname)()
batch = next(iter(dataloader))
lightning_module(batch)
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://router.neutrinoapp.com/api/llm-router"
DEFAULT_ROUTER = "default"
MAX_CONTEXT_WINDOW = 200000
class Neutrino(OpenAILike):
"""
Neutrino LLM.
Examples:
`pip install llama-index-llms-neutrino`
You can create an API key at: <a href="https://platform.neutrinoapp.com/">platform.neutrinoapp.com</a>
```python
import os
os.environ["NEUTRINO_API_KEY"] = "<your-neutrino-api-key>"
```
A router is a collection of LLMs that you can route queries to. You can create a router in the Neutrino <a href="https://platform.neutrinoapp.com/">dashboard</a> or use the default router,
which includes all supported models.
You can treat a router as a LLM.
```python
from llama_index.llms.neutrino import Neutrino
llm = Neutrino(
# api_key="<your-neutrino-api-key>",
# router="<your-router-id>" # (or 'default')
)
response = llm.complete("In short, a Neutrino is")
print(f"Optimal model: {response.raw['model']}")
print(response)
```
"""
model: str = Field(
description="The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."
)
context_window: int = Field(
default=MAX_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: Optional[str] = None,
router: str = DEFAULT_ROUTER,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "NEUTRINO_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "NEUTRINO_API_KEY")
model = model or router
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Neutrino_LLM"
|
from typing import Any, Dict, Optional
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.bridge.pydantic import Field
from llama_index.core.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.core.base.llms.generic_utils import get_from_param_or_env
from llama_index.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://router.neutrinoapp.com/api/llm-router"
DEFAULT_ROUTER = "default"
MAX_CONTEXT_WINDOW = 200000
class Neutrino(OpenAILike):
"""Neutrino LLM.
Examples:
`pip install llama-index-llms-neutrino`
You can create an API key at: <a href="https://platform.neutrinoapp.com/">platform.neutrinoapp.com</a>
```python
import os
os.environ["NEUTRINO_API_KEY"] = "<your-neutrino-api-key>"
```
A router is a collection of LLMs that you can route queries to. You can create a router in the Neutrino <a href="https://platform.neutrinoapp.com/">dashboard</a> or use the default router,
which includes all supported models.
You can treat a router as a LLM.
```python
from llama_index.llms.neutrino import Neutrino
llm = Neutrino(
# api_key="<your-neutrino-api-key>",
# router="<your-router-id>" # (or 'default')
)
response = llm.complete("In short, a Neutrino is")
print(f"Optimal model: {response.raw['model']}")
print(response)
```
"""
model: str = Field(
description="The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."
)
context_window: int = Field(
default=MAX_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.model_fields["is_chat_model"].description,
)
def __init__(
self,
model: Optional[str] = None,
router: str = DEFAULT_ROUTER,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "NEUTRINO_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "NEUTRINO_API_KEY")
model = model or router
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Neutrino_LLM"
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize
from keras.src.dtype_policies import get
from keras.src.dtype_policies import serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize
from keras.src.dtype_policies import get
from keras.src.dtype_policies import serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial002 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert '"syntaxHighlight": false' not in response.text, (
"not used parameters should not be included"
)
assert '"syntaxHighlight": {"theme": "obsidian"}' in response.text, (
"parameters with middle dots should be included in a JSON compatible way"
)
assert '"dom_id": "#swagger-ui"' in response.text, (
"default configs should be preserved"
)
assert "presets: [" in response.text, "default configs should be preserved"
assert "SwaggerUIBundle.presets.apis," in response.text, (
"default configs should be preserved"
)
assert "SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text, (
"default configs should be preserved"
)
assert '"layout": "BaseLayout",' in response.text, (
"default configs should be preserved"
)
assert '"deepLinking": true,' in response.text, (
"default configs should be preserved"
)
assert '"showExtensions": true,' in response.text, (
"default configs should be preserved"
)
assert '"showCommonExtensions": true,' in response.text, (
"default configs should be preserved"
)
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
from fastapi.testclient import TestClient
from docs_src.configure_swagger_ui.tutorial002 import app
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200, response.text
assert (
'"syntaxHighlight": false' not in response.text
), "not used parameters should not be included"
assert (
'"syntaxHighlight": {"theme": "obsidian"}' in response.text
), "parameters with middle dots should be included in a JSON compatible way"
assert (
'"dom_id": "#swagger-ui"' in response.text
), "default configs should be preserved"
assert "presets: [" in response.text, "default configs should be preserved"
assert (
"SwaggerUIBundle.presets.apis," in response.text
), "default configs should be preserved"
assert (
"SwaggerUIBundle.SwaggerUIStandalonePreset" in response.text
), "default configs should be preserved"
assert (
'"layout": "BaseLayout",' in response.text
), "default configs should be preserved"
assert (
'"deepLinking": true,' in response.text
), "default configs should be preserved"
assert (
'"showExtensions": true,' in response.text
), "default configs should be preserved"
assert (
'"showCommonExtensions": true,' in response.text
), "default configs should be preserved"
def test_get_users():
response = client.get("/users/foo")
assert response.status_code == 200, response.text
assert response.json() == {"message": "Hello foo"}
|
import json
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
@pytest.fixture
def mock_jira(): # type: ignore
with patch("atlassian.Jira") as mock_jira:
yield mock_jira
@pytest.mark.requires("atlassian")
class TestJiraAPIWrapper:
def test_jira_api_wrapper(self, mock_jira: MagicMock) -> None:
"""Test for Jira API Wrapper using mocks"""
# Configure the mock instance
mock_jira_instance = mock_jira.return_value
# Mock projects method to return mock projects
mock_project1 = MagicMock(key="PROJ1")
mock_project2 = MagicMock(key="PROJ2")
# Set up the mock to return our mock projects
mock_jira_instance.projects.return_value = [mock_project1, mock_project2]
# Initialize wrapper with mocks in place
jira_wrapper = JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=True,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=True,
)
# Test get_projects function
result = jira_wrapper.run("get_projects", "")
# Verify the mock was called and the result contains expected info
mock_jira_instance.projects.assert_called_once()
assert result.startswith("Found 2 projects")
def test_jira_api_wrapper_with_cloud_false(self, mock_jira: MagicMock) -> None:
JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=False,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=False,
)
def test_jira_api_wrapper_with_oauth_dict(self, mock_jira: MagicMock) -> None:
oauth_dict = {
"client_id": "test_client_id",
"token": {
"access_token": "test_access_token",
"token_type": "test_token_type",
},
}
oauth_string = json.dumps(oauth_dict)
JiraAPIWrapper(
jira_oauth2=oauth_string,
jira_instance_url="https://test.atlassian.net",
jira_cloud=False,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
oauth2={"client": None, **oauth_dict},
cloud=False,
)
|
from unittest.mock import MagicMock, patch
import pytest
from langchain_community.utilities.jira import JiraAPIWrapper
@pytest.fixture
def mock_jira(): # type: ignore
with patch("atlassian.Jira") as mock_jira:
yield mock_jira
@pytest.mark.requires("atlassian")
class TestJiraAPIWrapper:
def test_jira_api_wrapper(self, mock_jira: MagicMock) -> None:
"""Test for Jira API Wrapper using mocks"""
# Configure the mock instance
mock_jira_instance = mock_jira.return_value
# Mock projects method to return mock projects
mock_project1 = MagicMock(key="PROJ1")
mock_project2 = MagicMock(key="PROJ2")
# Set up the mock to return our mock projects
mock_jira_instance.projects.return_value = [mock_project1, mock_project2]
# Initialize wrapper with mocks in place
jira_wrapper = JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=True,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=True,
)
# Test get_projects function
result = jira_wrapper.run("get_projects", "")
# Verify the mock was called and the result contains expected info
mock_jira_instance.projects.assert_called_once()
assert result.startswith("Found 2 projects")
def test_jira_api_wrapper_with_cloud_false(self, mock_jira: MagicMock) -> None:
JiraAPIWrapper(
jira_username="test_user",
jira_api_token="test_token",
jira_instance_url="https://test.atlassian.net",
jira_cloud=False,
)
mock_jira.assert_called_once_with(
url="https://test.atlassian.net",
username="test_user",
password="test_token",
cloud=False,
)
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseCumulativeTransformOutputParser",
"BaseGenerationOutputParser",
"BaseLLMOutputParser",
"BaseOutputParser",
"BaseTransformOutputParser",
"CommaSeparatedListOutputParser",
"JsonOutputKeyToolsParser",
"JsonOutputParser",
"JsonOutputToolsParser",
"ListOutputParser",
"MarkdownListOutputParser",
"NumberedListOutputParser",
"PydanticOutputParser",
"PydanticToolsParser",
"SimpleJsonOutputParser",
"StrOutputParser",
"XMLOutputParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return __all__
|
"""**OutputParser** classes parse the output of an LLM call.
**Class hierarchy:**
.. code-block::
BaseLLMOutputParser --> BaseOutputParser --> <name>OutputParser # ListOutputParser, PydanticOutputParser
**Main helpers:**
.. code-block::
Serializable, Generation, PromptValue
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.output_parsers.base import (
BaseGenerationOutputParser,
BaseLLMOutputParser,
BaseOutputParser,
)
from langchain_core.output_parsers.json import (
JsonOutputParser,
SimpleJsonOutputParser,
)
from langchain_core.output_parsers.list import (
CommaSeparatedListOutputParser,
ListOutputParser,
MarkdownListOutputParser,
NumberedListOutputParser,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.output_parsers.pydantic import PydanticOutputParser
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.output_parsers.transform import (
BaseCumulativeTransformOutputParser,
BaseTransformOutputParser,
)
from langchain_core.output_parsers.xml import XMLOutputParser
__all__ = [
"BaseLLMOutputParser",
"BaseGenerationOutputParser",
"BaseOutputParser",
"ListOutputParser",
"CommaSeparatedListOutputParser",
"NumberedListOutputParser",
"MarkdownListOutputParser",
"StrOutputParser",
"BaseTransformOutputParser",
"BaseCumulativeTransformOutputParser",
"SimpleJsonOutputParser",
"XMLOutputParser",
"JsonOutputParser",
"PydanticOutputParser",
"JsonOutputToolsParser",
"JsonOutputKeyToolsParser",
"PydanticToolsParser",
]
_dynamic_imports = {
"BaseLLMOutputParser": "base",
"BaseGenerationOutputParser": "base",
"BaseOutputParser": "base",
"JsonOutputParser": "json",
"SimpleJsonOutputParser": "json",
"ListOutputParser": "list",
"CommaSeparatedListOutputParser": "list",
"MarkdownListOutputParser": "list",
"NumberedListOutputParser": "list",
"JsonOutputKeyToolsParser": "openai_tools",
"JsonOutputToolsParser": "openai_tools",
"PydanticToolsParser": "openai_tools",
"PydanticOutputParser": "pydantic",
"StrOutputParser": "string",
"BaseTransformOutputParser": "transform",
"BaseCumulativeTransformOutputParser": "transform",
"XMLOutputParser": "xml",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return __all__
|
from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a JSON, a binary file or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. Note that, CSV file has very limited
compatability, complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to save data into a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
"""
if file_format == 'json':
self.save_json(file, encoding=encoding)
elif file_format == 'binary':
self.save_binary(file)
elif file_format == 'csv':
self.save_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
@classmethod
def load(
cls: Type['T'],
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
**kwargs
) -> 'T':
"""Load array elements from a JSON or a binary file, or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. CSV file has very limited compatability,
complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to load data from a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
:return: the loaded DocumentArray object
"""
if file_format == 'json':
return cls.load_json(file, encoding=encoding, **kwargs)
elif file_format == 'binary':
return cls.load_binary(file)
elif file_format == 'csv':
return cls.load_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
|
from typing import Union, TextIO, BinaryIO, TYPE_CHECKING, Type
if TYPE_CHECKING:
from docarray.typing import T
class CommonIOMixin:
"""The common IO helper function for arrays."""
def save(
self,
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a JSON, a binary file or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. Note that, CSV file has very limited
compatability, complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to save data into a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
"""
if file_format == 'json':
self.save_json(file, encoding=encoding)
elif file_format == 'binary':
self.save_binary(file)
elif file_format == 'csv':
self.save_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
@classmethod
def load(
cls: Type['T'],
file: Union[str, TextIO, BinaryIO],
file_format: str = 'binary',
encoding: str = 'utf-8',
**kwargs
) -> 'T':
"""Load array elements from a JSON or a binary file, or a CSV file.
:param file: File or filename to which the data is saved.
:param file_format: `json` or `binary` or `csv`. JSON and CSV files are human-readable,
but binary format gives much smaller size and faster save/load speed. CSV file has very limited compatability,
complex DocumentArray with nested structure can not be restored from a CSV file.
:param encoding: encoding used to load data from a file (it only applies to `JSON` and `CSV` format).
By default, ``utf-8`` is used.
:return: the loaded DocumentArray object
"""
if file_format == 'json':
return cls.load_json(file, encoding=encoding, **kwargs)
elif file_format == 'binary':
return cls.load_binary(file)
elif file_format == 'csv':
return cls.load_csv(file, encoding=encoding)
else:
raise ValueError('`format` must be one of [`json`, `binary`, `csv`]')
|
from typing import Any, Type, TypeVar, Union
from docarray.base_doc import BaseDoc
from docarray.typing.tensor.tensor import AnyTensor
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDoc):
"""
Document for handling 3D mesh tensor data.
A VerticesAndFaces Document can contain an AnyTensor containing the vertices
information (`VerticesAndFaces.vertices`), and an AnyTensor containing the faces
information (`VerticesAndFaces.faces`).
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
import trimesh
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
from typing import Any, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing.tensor.tensor import AnyTensor
T = TypeVar('T', bound='VerticesAndFaces')
class VerticesAndFaces(BaseDocument):
"""
Document for handling 3D mesh tensor data.
A VerticesAndFaces Document can contain an AnyTensor containing the vertices
information (`VerticesAndFaces.vertices`), and an AnyTensor containing the faces
information (`VerticesAndFaces.faces`).
"""
vertices: AnyTensor
faces: AnyTensor
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
return super().validate(value)
def display(self) -> None:
"""
Plot mesh consisting of vertices and faces.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
"""
import trimesh
from IPython.display import display
if self.vertices is None or self.faces is None:
raise ValueError(
'Can\'t display mesh from tensors when the vertices and/or faces '
'are None.'
)
mesh = trimesh.Trimesh(vertices=self.vertices, faces=self.faces)
display(mesh.show())
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Args:
dims: Tuple of integers. Permutation pattern does not include the
batch dimension. Indexing starts at 1.
For instance, `(1, 3, 2)` permutes the second and third dimensions
of the input.
Input shape:
Arbitrary.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Permute((2, 1))(x)
>>> y.shape
(None, 64, 10)
"""
def __init__(self, dims, **kwargs):
super().__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
"Invalid permutation argument `dims` for Permute Layer. "
"The set of indices in `dims` must be consecutive and start "
f"from 1. Received dims={dims}"
)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
for dim in self.dims:
output_shape.append(input_shape[dim])
return tuple(output_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
return ops.transpose(inputs, axes=(0,) + self.dims)
def get_config(self):
config = {"dims": self.dims}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Permute")
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful e.g. connecting RNNs and convnets.
Args:
dims: Tuple of integers. Permutation pattern does not include the
batch dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimensions
of the input.
Input shape:
Arbitrary.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
Example:
>>> x = keras.Input(shape=(10, 64))
>>> y = keras.layers.Permute((2, 1))(x)
>>> y.shape
(None, 64, 10)
"""
def __init__(self, dims, **kwargs):
super().__init__(**kwargs)
self.dims = tuple(dims)
if sorted(dims) != list(range(1, len(dims) + 1)):
raise ValueError(
"Invalid permutation argument `dims` for Permute Layer. "
"The set of indices in `dims` must be consecutive and start "
f"from 1. Received dims={dims}"
)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def compute_output_shape(self, input_shape):
output_shape = [input_shape[0]]
for dim in self.dims:
output_shape.append(input_shape[dim])
return tuple(output_shape)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
return KerasTensor(
shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse
)
def call(self, inputs):
return ops.transpose(inputs, axes=(0,) + self.dims)
def get_config(self):
config = {"dims": self.dims}
base_config = super().get_config()
return {**base_config, **config}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .logger import get_caller_name, get_root_logger, log_img_scale
from .misc import find_latest_checkpoint, update_data_root
from .setup_env import setup_multi_processes
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale'
]
|
from typing import Any
def __getattr__(name: str = "") -> Any:
msg = (
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
raise AttributeError(msg)
|
from typing import Any
def __getattr__(name: str = "") -> Any:
raise AttributeError(
"This tool has been moved to langchain experiment. "
"This tool has access to a python REPL. "
"For best practices make sure to sandbox this tool. "
"Read https://github.com/langchain-ai/langchain/blob/master/SECURITY.md "
"To keep using this code as is, install langchain experimental and "
"update relevant imports replacing 'langchain' with 'langchain_experimental'"
)
|
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(self, id: str, **kwargs: Any) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
super().__init__("", id=id, **kwargs)
RemoveMessage.model_rebuild()
|
"""Message responsible for deleting other messages."""
from typing import Any, Literal
from langchain_core.messages.base import BaseMessage
class RemoveMessage(BaseMessage):
"""Message responsible for deleting other messages."""
type: Literal["remove"] = "remove"
"""The type of the message (used for serialization). Defaults to "remove"."""
def __init__(self, id: str, **kwargs: Any) -> None:
"""Create a RemoveMessage.
Args:
id: The ID of the message to remove.
kwargs: Additional fields to pass to the message.
Raises:
ValueError: If the 'content' field is passed in kwargs.
"""
if kwargs.pop("content", None):
msg = "RemoveMessage does not support 'content' field."
raise ValueError(msg)
return super().__init__("", id=id, **kwargs)
RemoveMessage.model_rebuild()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, oid_challenge_classes,
oid_v6_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import INSTANCE_OFFSET
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes',
'oid_challenge_classes', 'INSTANCE_OFFSET'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
get_classes, imagenet_det_classes,
imagenet_vid_classes, voc_classes)
from .eval_hooks import DistEvalHook, EvalHook
from .mean_ap import average_precision, eval_map, print_map_summary
from .panoptic_utils import INSTANCE_OFFSET
from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
print_recall_summary)
__all__ = [
'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
'print_map_summary', 'eval_recalls', 'print_recall_summary',
'plot_num_recall', 'plot_iou_recall', 'INSTANCE_OFFSET'
]
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture(scope="session")
def splade_bert_tiny_model_reused() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from __future__ import annotations
import pytest
from sentence_transformers import SparseEncoder
@pytest.fixture()
def splade_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sparse-encoder-testing/splade-bert-tiny-nq")
@pytest.fixture()
def csr_bert_tiny_model() -> SparseEncoder:
return SparseEncoder("sentence-transformers-testing/stsb-bert-tiny-safetensors")
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = list(sorted(list(set(self.languages)))) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Input: The Translate feature accepts a dictionary for each example mapping
string language codes to string translations.
Output: A dictionary mapping string language codes to translations as `Text`
features.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Input: The TranslationVariableLanguages feature accepts a dictionary for each
example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Output:
language: variable-length 1D tf.Tensor of tf.string language codes, sorted
in ascending order.
translation: variable-length 1D tf.Tensor of tf.string plain text
translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = list(sorted(list(set(self.languages)))) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except AttributeError:
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
def pytest_configure(config):
config.addinivalue_line("markers", "torchaudio_latest: mark test to run with torchaudio>=0.12")
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
spend_credits = cast(
Callable[[Any, NodeExecutionEntry, float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from functools import wraps
from typing import Any, Callable, Concatenate, Coroutine, ParamSpec, TypeVar, cast
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import get_graph, get_node
from backend.data.user import (
get_user_integrations,
get_user_metadata,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, register_pydantic_serializers
from backend.util.settings import Config
P = ParamSpec("P")
R = TypeVar("R")
config = Config()
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
@staticmethod
def exposed_run_and_wait(
f: Callable[P, Coroutine[None, None, R]]
) -> Callable[Concatenate[object, P], R]:
@expose
@wraps(f)
def wrapper(self, *args: P.args, **kwargs: P.kwargs) -> R:
coroutine = f(*args, **kwargs)
res = self.run_and_wait(coroutine)
return res
# Register serializers for annotations on bare function
register_pydantic_serializers(f)
return wrapper
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
# Credits
user_credit_model = get_user_credit_model()
spend_credits = cast(
Callable[[Any, str, str, dict[str, str], float, float], int],
exposed_run_and_wait(user_credit_model.spend_credits),
)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.6.0"
@keras_export("keras.version")
def version():
return __version__
|
from keras.src.api_export import keras_export
# Unique source of truth for the version number.
__version__ = "3.5.0"
@keras_export("keras.version")
def version():
return __version__
|
import importlib
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_py310, needs_pydanticv2
@pytest.fixture(
name="client",
params=[
"tutorial001",
pytest.param("tutorial001_py310", marks=needs_py310),
],
)
def get_client(request: pytest.FixtureRequest):
mod = importlib.import_module(f"docs_src.schema_extra_example.{request.param}")
client = TestClient(mod.app)
return client
@needs_pydanticv2
def test_post_body_example(client: TestClient):
response = client.put(
"/items/5",
json={
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
)
assert response.status_code == 200
@needs_pydanticv2
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
# insert_assert(response.json())
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"name": "item_id",
"in": "path",
"required": True,
"schema": {"type": "integer", "title": "Item Id"},
}
],
"requestBody": {
"required": True,
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Item": {
"properties": {
"name": {"type": "string", "title": "Name"},
"description": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Description",
},
"price": {"type": "number", "title": "Price"},
"tax": {
"anyOf": [{"type": "number"}, {"type": "null"}],
"title": "Tax",
},
},
"type": "object",
"required": ["name", "price"],
"title": "Item",
"examples": [
{
"description": "A very nice Item",
"name": "Foo",
"price": 35.4,
"tax": 3.2,
}
],
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
|
import pytest
from fastapi.testclient import TestClient
from ...utils import needs_pydanticv2
@pytest.fixture(name="client")
def get_client():
from docs_src.schema_extra_example.tutorial001 import app
client = TestClient(app)
return client
@needs_pydanticv2
def test_post_body_example(client: TestClient):
response = client.put(
"/items/5",
json={
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
)
assert response.status_code == 200
@needs_pydanticv2
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
# insert_assert(response.json())
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"name": "item_id",
"in": "path",
"required": True,
"schema": {"type": "integer", "title": "Item Id"},
}
],
"requestBody": {
"required": True,
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Item": {
"properties": {
"name": {"type": "string", "title": "Name"},
"description": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Description",
},
"price": {"type": "number", "title": "Price"},
"tax": {
"anyOf": [{"type": "number"}, {"type": "null"}],
"title": "Tax",
},
},
"type": "object",
"required": ["name", "price"],
"title": "Item",
"examples": [
{
"description": "A very nice Item",
"name": "Foo",
"price": 35.4,
"tax": 3.2,
}
],
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))))
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='GIoULoss', loss_weight=10.0))))
|
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.weaviate import DocumentArrayWeaviate, WeaviateConfig
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
from docarray.array.milvus import DocumentArrayMilvus, MilvusConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
def test_construct_docarray(da_cls, config, start_storage):
if config:
da = da_cls(config=config)
assert len(da) == 0
da = da_cls(Document(), config=config)
assert len(da) == 1
da = da_cls([Document(), Document()], config=config)
assert len(da) == 2
da = da_cls((Document(), Document()), config=config)
assert len(da) == 2
da = da_cls((Document() for _ in range(10)), config=config)
assert len(da) == 10
else:
da = da_cls()
assert len(da) == 0
da = da_cls(Document())
assert len(da) == 1
da = da_cls([Document(), Document()])
assert len(da) == 2
da = da_cls((Document(), Document()))
assert len(da) == 2
da = da_cls((Document() for _ in range(10)))
assert len(da) == 10
if da_cls is DocumentArrayInMemory:
da1 = da_cls(da)
assert len(da1) == 10
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_singleton(da_cls, config, is_copy, start_storage):
d = Document()
if config:
da = da_cls(d, copy=is_copy, config=config)
else:
da = da_cls(d, copy=is_copy)
d.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0].id != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_da(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
if config:
da = da_cls([d1, d2], copy=is_copy, config=config)
else:
da = da_cls([d1, d2], copy=is_copy)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=1)),
(DocumentArrayQdrant, QdrantConfig(n_dim=1)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
(DocumentArrayMilvus, MilvusConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_list(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
da = da_cls([d1, d2], copy=is_copy, config=config)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
|
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.weaviate import DocumentArrayWeaviate, WeaviateConfig
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
def test_construct_docarray(da_cls, config, start_storage):
if config:
da = da_cls(config=config)
assert len(da) == 0
da = da_cls(Document(), config=config)
assert len(da) == 1
da = da_cls([Document(), Document()], config=config)
assert len(da) == 2
da = da_cls((Document(), Document()), config=config)
assert len(da) == 2
da = da_cls((Document() for _ in range(10)), config=config)
assert len(da) == 10
else:
da = da_cls()
assert len(da) == 0
da = da_cls(Document())
assert len(da) == 1
da = da_cls([Document(), Document()])
assert len(da) == 2
da = da_cls((Document(), Document()))
assert len(da) == 2
da = da_cls((Document() for _ in range(10)))
assert len(da) == 10
if da_cls is DocumentArrayInMemory:
da1 = da_cls(da)
assert len(da1) == 10
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_singleton(da_cls, config, is_copy, start_storage):
d = Document()
if config:
da = da_cls(d, copy=is_copy, config=config)
else:
da = da_cls(d, copy=is_copy)
d.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0].id != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_da(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
if config:
da = da_cls([d1, d2], copy=is_copy, config=config)
else:
da = da_cls([d1, d2], copy=is_copy)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=1)),
(DocumentArrayQdrant, QdrantConfig(n_dim=1)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_list(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
da = da_cls([d1, d2], copy=is_copy, config=config)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES:
yaml = _get_yaml(template, params)
if params.get('device_plugins'):
yaml = _get_deployment_with_device_plugins(yaml, params)
if params.get('env_from_secret'):
yaml = _get_deployment_with_env_secret(yaml, params)
if params.get('image_pull_secrets'):
yaml = _get_deployment_with_image_pull_secrets(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path, encoding='utf-8') as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path, encoding='utf-8') as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = str(value)
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
def _get_deployment_with_env_secret(deployment: Dict, params: Dict) -> Dict:
for k, v in params['env_from_secret'].items():
env_var = {
'name': k,
'valueFrom': {'secretKeyRef': {'name': v['name'], 'key': v['key']}},
}
deployment['spec']['template']['spec']['containers'][0]['env'].append(env_var)
return deployment
def _get_deployment_with_image_pull_secrets(deployment: Dict, params: Dict) -> Dict:
image_pull_secrets = params['image_pull_secrets']
image_pull_secrets_dict = [{'name': secret} for secret in image_pull_secrets]
deployment['spec']['template']['spec']['imagePullSecrets'] = image_pull_secrets_dict
return deployment
|
import os
from typing import Dict
DEPLOYMENT_FILES = [
'statefulset-executor',
'deployment-executor',
'deployment-gateway',
'deployment-uses-before',
'deployment-uses-after',
'deployment-uses-before-after',
]
cur_dir = os.path.dirname(__file__)
DEFAULT_RESOURCE_DIR = os.path.join(
cur_dir, '..', '..', '..', '..', 'resources', 'k8s', 'template'
)
def get_yaml(template: str, params: Dict) -> Dict:
"""Create a resource on Kubernetes based on the `template`. It fills the `template` using the `params`.
:param template: path to the template file.
:param params: dictionary for replacing the placeholders (keys) with the actual values.
:return: The yaml dictionary with the corresponding template filled with parameters
"""
if template == 'configmap':
yaml = _get_configmap_yaml(template, params)
elif template in DEPLOYMENT_FILES:
yaml = _get_yaml(template, params)
if params.get('device_plugins'):
yaml = _get_deployment_with_device_plugins(yaml, params)
if params.get('env_from_secret'):
yaml = _get_deployment_with_env_secret(yaml, params)
if params.get('image_pull_secrets'):
yaml = _get_deployment_with_image_pull_secrets(yaml, params)
else:
yaml = _get_yaml(template, params)
return yaml
def _get_yaml(template: str, params: Dict) -> Dict:
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path, encoding='utf-8') as f:
content = f.read()
for k, v in params.items():
content = content.replace(f'{{{k}}}', str(v))
d = yaml.safe_load(content)
return d
def _get_configmap_yaml(template: str, params: Dict):
import yaml
path = os.path.join(DEFAULT_RESOURCE_DIR, f'{template}.yml')
with open(path, encoding='utf-8') as f:
config_map = yaml.safe_load(f)
config_map['metadata']['name'] = params.get('name') + '-' + 'configmap'
config_map['metadata']['namespace'] = params.get('namespace')
if params.get('data'):
for key, value in params['data'].items():
config_map['data'][key] = str(value)
return config_map
def _get_device_plugins(params: Dict):
data = {'limits': {}}
for key, value in params.items():
data['limits'][key] = value
return data
def _get_deployment_with_device_plugins(deployment: Dict, params: Dict) -> Dict:
device_plugins = _get_device_plugins(params['device_plugins'])
deployment['spec']['template']['spec']['containers'][0][
'resources'
] = device_plugins
return deployment
def _get_deployment_with_env_secret(deployment: Dict, params: Dict) -> Dict:
for k, v in params['env_from_secret'].items():
env_var = {'name': k, 'valueFrom': {'secretKeyRef': {'name': v['name'], 'key': v['key']}}}
deployment['spec']['template']['spec']['containers'][0]['env'].append(env_var)
return deployment
def _get_deployment_with_image_pull_secrets(deployment: Dict, params: Dict) -> Dict:
image_pull_secrets = params['image_pull_secrets']
image_pull_secrets_dict = [{'name': secret} for secret in image_pull_secrets]
deployment['spec']['template']['spec'][
'imagePullSecrets'
] = image_pull_secrets_dict
return deployment
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, Tuple, Type, TypeVar
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
__name__ = f'{cls.__name__}[{shape_str}]'
__qualname__ = f'{cls.__qualname__}[{shape_str}]'
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__validate_shape__(t, _cls._docarray_target_shape)
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__validate_getitem__(item)
return cls._create_parametrized_type(target_shape)
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, Tuple, Type, TypeVar
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enbale syntax of the form Tensor[shape].
The intended behavoiour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def _create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
__name__ = f'{cls.__name__}[{shape_str}]'
__qualname__ = f'{cls.__qualname__}[{shape_str}]'
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__validate_shape__(t, _cls._docarray_target_shape)
return _ParametrizedTensor
def __class_getitem__(cls, item):
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return cls._create_parametrized_type(item)
|
"""Function Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
name: str
"""The name of the function that was executed."""
type: Literal["function"] = "function"
"""The type of the message (used for serialization). Defaults to "function"."""
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "FunctionMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
msg = "Cannot concatenate FunctionMessageChunks with different names."
raise ValueError(msg)
return self.__class__(
name=self.name,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
"""Function Message."""
from typing import Any, Literal
from typing_extensions import override
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class FunctionMessage(BaseMessage):
"""Message for passing the result of executing a tool back to a model.
FunctionMessage are an older version of the ToolMessage schema, and
do not contain the tool_call_id field.
The tool_call_id field is used to associate the tool call request with the
tool call response. This is useful in situations where a chat model is able
to request multiple tool calls in parallel.
"""
name: str
"""The name of the function that was executed."""
type: Literal["function"] = "function"
"""The type of the message (used for serialization). Defaults to "function"."""
FunctionMessage.model_rebuild()
class FunctionMessageChunk(FunctionMessage, BaseMessageChunk):
"""Function Message chunk."""
# Ignoring mypy re-assignment here since we're overriding the value
# to make sure that the chunk variant can be discriminated from the
# non-chunk variant.
type: Literal["FunctionMessageChunk"] = "FunctionMessageChunk" # type: ignore[assignment]
"""The type of the message (used for serialization).
Defaults to "FunctionMessageChunk"."""
@override
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
if isinstance(other, FunctionMessageChunk):
if self.name != other.name:
msg = "Cannot concatenate FunctionMessageChunks with different names."
raise ValueError(msg)
return self.__class__(
name=self.name,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
id=self.id,
)
return super().__add__(other)
|
from typing import List
import numpy as np
from torch.utils.data import Dataset
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from sentence_transformers.readers.InputExample import InputExample
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
Args:
sentences: A list of sentences
noise_fn: A noise function: Given a string, it returns a string
with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
from nltk import TreebankWordDetokenizer, word_tokenize
words = word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from torch.utils.data import Dataset
from typing import List
from ..readers.InputExample import InputExample
import numpy as np
import nltk
from nltk.tokenize.treebank import TreebankWordDetokenizer
class DenoisingAutoEncoderDataset(Dataset):
"""
The DenoisingAutoEncoderDataset returns InputExamples in the format: texts=[noise_fn(sentence), sentence]
It is used in combination with the DenoisingAutoEncoderLoss: Here, a decoder tries to re-construct the
sentence without noise.
:param sentences: A list of sentences
:param noise_fn: A noise function: Given a string, it returns a string with noise, e.g. deleted words
"""
def __init__(self, sentences: List[str], noise_fn=lambda s: DenoisingAutoEncoderDataset.delete(s)):
self.sentences = sentences
self.noise_fn = noise_fn
def __getitem__(self, item):
sent = self.sentences[item]
return InputExample(texts=[self.noise_fn(sent), sent])
def __len__(self):
return len(self.sentences)
# Deletion noise.
@staticmethod
def delete(text, del_ratio=0.6):
words = nltk.word_tokenize(text)
n = len(words)
if n == 0:
return text
keep_or_not = np.random.rand(n) > del_ratio
if sum(keep_or_not) == 0:
keep_or_not[np.random.choice(n)] = True # guarantee that at least one word remains
words_processed = TreebankWordDetokenizer().detokenize(np.array(words)[keep_or_not])
return words_processed
|
from pathlib import Path
from typing import Callable
import numpy as np
import pytest
import torchaudio
from jina import Document, DocumentArray
from vad_speech_segmenter import VADSpeechSegmenter
@pytest.fixture(scope='module')
def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter':
workspace = tmpdir_factory.mktemp('data')
return VADSpeechSegmenter(
normalize=False, dump=True, metas={'workspace': str(workspace)}
)
@pytest.fixture
def build_da() -> Callable[[str], 'Document']:
def _build_da(_type):
assert _type in {'wav', 'mp3', 'blob'}
doc = Document(id=_type)
extension = _type if _type != 'blob' else 'wav'
path = str(Path(__file__).parent / f'data/audio/test.{extension}')
if _type == 'blob':
data, sample_rate = torchaudio.load(path)
data = np.mean(data.cpu().numpy(), axis=0)
doc.blob = data
doc.tags['sample_rate'] = sample_rate
else:
doc.uri = path
return DocumentArray(doc)
return _build_da
|
from pathlib import Path
from typing import Callable
import numpy as np
import pytest
import torchaudio
from jina import Document, DocumentArray
from ..vad_speech_segmenter import VADSpeechSegmenter
@pytest.fixture(scope='module')
def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter':
workspace = tmpdir_factory.mktemp('data')
return VADSpeechSegmenter(
normalize=False, dump=True, metas={'workspace': str(workspace)}
)
@pytest.fixture
def build_da() -> Callable[[str], 'Document']:
def _build_da(_type):
assert _type in {'wav', 'mp3', 'blob'}
doc = Document(id=_type)
extension = _type if _type != 'blob' else 'wav'
path = str(Path(__file__).parent / f'data/audio/test.{extension}')
if _type == 'blob':
data, sample_rate = torchaudio.load(path)
data = np.mean(data.cpu().numpy(), axis=0)
doc.blob = data
doc.tags['sample_rate'] = sample_rate
else:
doc.uri = path
return DocumentArray(doc)
return _build_da
|
"""Standard LangChain interface tests for Responses API"""
from pathlib import Path
from typing import cast
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_standard import TestOpenAIStandard
REPO_ROOT_DIR = Path(__file__).parents[6]
class TestOpenAIResponses(TestOpenAIStandard):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "use_responses_api": True}
@pytest.mark.xfail(reason="Unsupported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4.1-mini", output_version="responses/v1")
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(
model="o4-mini",
reasoning={"effort": "medium", "summary": "auto"},
output_version="responses/v1",
)
input_ = "What was the 3rd highest building in 2000?"
return _invoke(llm, input_, stream)
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
else:
return cast(AIMessage, llm.invoke(input_))
|
"""Standard LangChain interface tests for Responses API"""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_openai import ChatOpenAI
from tests.integration_tests.chat_models.test_base_standard import TestOpenAIStandard
class TestOpenAIResponses(TestOpenAIStandard):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini", "use_responses_api": True}
@pytest.mark.xfail(reason="Unsupported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
|
from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = get_session().get(
url=dataset_viewer_parquet_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == commit_hash or commit_hash is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (commit_hash='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, commit_hash: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = get_session().get(
url=dataset_viewer_info_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == commit_hash or commit_hash is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (commit_hash='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported dataset infos available.")
|
from typing import Any, Dict, List, Optional, Union
from huggingface_hub.utils import get_session
from .. import config
from ..exceptions import DatasetsError
from .file_utils import (
get_authentication_headers_for_url,
)
from .logging import get_logger
logger = get_logger(__name__)
class DatasetViewerError(DatasetsError):
"""Dataset viewer error.
Raised when trying to use the dataset viewer HTTP API and when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
- unavailable /parquet or /info responses
"""
def get_exported_parquet_files(dataset: str, revision: str, token: Optional[Union[str, bool]]) -> List[Dict[str, Any]]:
"""
Get the dataset exported parquet files
Docs: https://huggingface.co/docs/datasets-server/parquet
"""
dataset_viewer_parquet_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/parquet?dataset="
try:
parquet_data_files_response = get_session().get(
url=dataset_viewer_parquet_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
parquet_data_files_response.raise_for_status()
if "X-Revision" in parquet_data_files_response.headers:
if parquet_data_files_response.headers["X-Revision"] == revision or revision is None:
parquet_data_files_response_json = parquet_data_files_response.json()
if (
parquet_data_files_response_json.get("partial") is False
and not parquet_data_files_response_json.get("pending", True)
and not parquet_data_files_response_json.get("failed", True)
and "parquet_files" in parquet_data_files_response_json
):
return parquet_data_files_response_json["parquet_files"]
else:
logger.debug(f"Parquet export for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Parquet export for {dataset} is available but outdated (revision='{parquet_data_files_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the parquet export doesn't exist
logger.debug(f"No parquet export for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported Parquet files available.")
def get_exported_dataset_infos(
dataset: str, revision: str, token: Optional[Union[str, bool]]
) -> Dict[str, Dict[str, Any]]:
"""
Get the dataset information, can be useful to get e.g. the dataset features.
Docs: https://huggingface.co/docs/datasets-server/info
"""
dataset_viewer_info_url = config.HF_ENDPOINT.replace("://", "://datasets-server.") + "/info?dataset="
try:
info_response = get_session().get(
url=dataset_viewer_info_url + dataset,
headers=get_authentication_headers_for_url(config.HF_ENDPOINT + f"datasets/{dataset}", token=token),
timeout=100.0,
)
info_response.raise_for_status()
if "X-Revision" in info_response.headers:
if info_response.headers["X-Revision"] == revision or revision is None:
info_response = info_response.json()
if (
info_response.get("partial") is False
and not info_response.get("pending", True)
and not info_response.get("failed", True)
and "dataset_info" in info_response
):
return info_response["dataset_info"]
else:
logger.debug(f"Dataset info for {dataset} is not completely ready yet.")
else:
logger.debug(
f"Dataset info for {dataset} is available but outdated (revision='{info_response.headers['X-Revision']}')"
)
except Exception as e: # noqa catch any exception of the dataset viewer API and consider the dataset info doesn't exist
logger.debug(f"No dataset info for {dataset} available ({type(e).__name__}: {e})")
raise DatasetViewerError("No exported dataset infos available.")
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of [`TensorFlowTensor`][docarray.typing.TensorFlowTensor],
to represent an image tensor. Adds image-specific features to the tensor.
For instance the ability convert the tensor back to
[`ImageBytes`][docarray.typing.ImageBytes] which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from typing import TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.image.abstract_image_tensor import AbstractImageTensor
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor, metaTensorFlow
T = TypeVar('T', bound='ImageTensorFlowTensor')
@_register_proto(proto_type_name='image_tensorflow_tensor')
class ImageTensorFlowTensor(
TensorFlowTensor, AbstractImageTensor, metaclass=metaTensorFlow
):
"""
Subclass of [`TensorFlowTensor`][docarray.typing.TensorFlowTensor],
to represent an image tensor. Adds image-specific features to the tensor.
For instance the ability convert the tensor back to image bytes which are
optimized to send over the wire.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import ImageBytes, ImageTensorFlowTensor, ImageUrl
class MyImageDoc(BaseDoc):
title: str
tensor: Optional[ImageTensorFlowTensor]
url: Optional[ImageUrl]
bytes: Optional[ImageBytes]
doc = MyImageDoc(
title='my_second_image_doc',
url="https://upload.wikimedia.org/wikipedia/commons/8/80/"
"Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg",
)
doc.tensor = doc.url.load()
doc.bytes = doc.tensor.to_bytes()
```
---
"""
...
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .librilight_limited import LibriLightLimited
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"LibriLightLimited",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
]
|
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .commonvoice import COMMONVOICE
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .librimix import LibriMix
from .librispeech import LIBRISPEECH
from .libritts import LIBRITTS
from .ljspeech import LJSPEECH
from .quesst14 import QUESST14
from .speechcommands import SPEECHCOMMANDS
from .tedlium import TEDLIUM
from .vctk import VCTK_092
from .yesno import YESNO
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
"QUESST14",
]
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`VideoDoc.url`), an Audio Document
(`VideoDoc.audio`), a VideoTensor (`VideoDoc.tensor`), an AnyTensor representing
the indices of the video's key frames (`VideoDoc.key_frame_indices`) and an
AnyEmbedding (`VideoDoc.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load().video
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes = mmdoc.video.url.load_bytes()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available
and isinstance(value, torch.Tensor)
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'milvus': [
'pymilvus~=2.1.0',
],
'opensearch': ['opensearch-py==2.0.1'],
'benchmark': [
'pandas',
'matplotlib',
'seaborn',
'h5py',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'pymilvus==2.1.3',
'opensearch-py==2.0.1',
'jina',
'pytest-mock',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='DocArray team',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.24.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'grpcio>=1.46.0,<1.48.1',
'grpcio-reflection>=1.46.0,<1.48.1',
'grpcio-health-checking>=1.46.0,<1.48.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.10.3',
],
'annlite': [
'annlite',
],
'weaviate': [
'weaviate-client~=3.9.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'milvus': [
'pymilvus~=2.1.0',
],
'benchmark': [
'pandas',
'matplotlib',
'seaborn',
'h5py',
],
'test': [
'protobuf>=3.13.0,<=3.20.0', # pip dependency resolution does not respect this restriction from paddle
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.9.0',
'annlite',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'pymilvus==2.1.3',
'jina',
'pytest-mock',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import AspectRatioBatchSampler, ClassAwareSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'ConcatDataset', 'RepeatDataset', 'ClassBalancedDataset',
'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset',
'replace_ImageToTensor', 'get_loading_pipeline', 'NumClassCheckHook',
'CocoPanopticDataset', 'MultiImageMixDataset', 'OpenImagesDataset',
'OpenImagesChallengeDataset', 'AspectRatioBatchSampler',
'ClassAwareSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_panoptic import CocoPanopticDataset
from .custom import CustomDataset
from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
MultiImageMixDataset, RepeatDataset)
from .deepfashion import DeepFashionDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
from .utils import (NumClassCheckHook, get_loading_pipeline,
replace_ImageToTensor)
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset',
'OpenImagesDataset', 'OpenImagesChallengeDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (aligned_bilinear, center_of_mass, empty_instances,
filter_gt_instances, filter_scores_and_topk, flip_tensor,
generate_coordinate, images_to_levels, interpolate_as,
levels_to_images, mask2ndarray, multi_apply,
relative_coordinate_maps, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .make_divisible import make_divisible
from .misc import (center_of_mass, empty_instances, filter_gt_instances,
filter_scores_and_topk, flip_tensor, generate_coordinate,
images_to_levels, interpolate_as, levels_to_images,
mask2ndarray, multi_apply, rename_loss_dict,
reweight_loss_dict, samplelist_boxtype2tensor,
select_single_mlvl, sigmoid_geometric_mean, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict'
]
|
from jina.serve.runtimes.gateway.http.fastapi import FastAPIBaseGateway
__all__ = ['HTTPGateway']
class HTTPGateway(FastAPIBaseGateway):
"""
:class:`HTTPGateway` is a FastAPIBaseGateway that uses the default FastAPI app
"""
@property
def app(self):
"""Get the default base API app for HTTPGateway
:return: Return a FastAPI app for the default HTTPGateway
"""
return self._request_handler._http_fastapi_default_app(title=self.title,
description=self.description,
no_crud_endpoints=self.no_crud_endpoints,
no_debug_endpoints=self.no_debug_endpoints,
expose_endpoints=self.expose_endpoints,
expose_graphql_endpoint=self.expose_graphql_endpoint,
tracing=self.tracing,
tracer_provider=self.tracer_provider,
cors=self.cors)
|
from jina.serve.runtimes.gateway.http.gateway import HTTPGateway
__all__ = ['HTTPGateway']
|
import json
import os
import zlib
from typing import Callable, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
text_bytes = text.encode("utf-8")
return len(text_bytes) / len(zlib.compress(text_bytes))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
class ResultWriter:
extension: str
def __init__(self, output_dir: str):
self.output_dir = output_dir
def __call__(self, result: dict, audio_path: str):
audio_basename = os.path.basename(audio_path)
output_path = os.path.join(self.output_dir, audio_basename + "." + self.extension)
with open(output_path, "w", encoding="utf-8") as f:
self.write_result(result, file=f)
def write_result(self, result: dict, file: TextIO):
raise NotImplementedError
class WriteTXT(ResultWriter):
extension: str = "txt"
def write_result(self, result: dict, file: TextIO):
for segment in result["segments"]:
print(segment['text'].strip(), file=file, flush=True)
class WriteVTT(ResultWriter):
extension: str = "vtt"
def write_result(self, result: dict, file: TextIO):
print("WEBVTT\n", file=file)
for segment in result["segments"]:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteSRT(ResultWriter):
extension: str = "srt"
def write_result(self, result: dict, file: TextIO):
for i, segment in enumerate(result["segments"], start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
class WriteJSON(ResultWriter):
extension: str = "json"
def write_result(self, result: dict, file: TextIO):
json.dump(result, file)
def get_writer(output_format: str, output_dir: str) -> Callable[[dict, TextIO], None]:
writers = {
"txt": WriteTXT,
"vtt": WriteVTT,
"srt": WriteSRT,
"json": WriteJSON,
}
if output_format == "all":
all_writers = [writer(output_dir) for writer in writers.values()]
def write_all(result: dict, file: TextIO):
for writer in all_writers:
writer(result, file)
return write_all
return writers[output_format](output_dir)
|
import zlib
from typing import Iterator, TextIO
def exact_div(x, y):
assert x % y == 0
return x // y
def str2bool(string):
str2val = {"True": True, "False": False}
if string in str2val:
return str2val[string]
else:
raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
def optional_int(string):
return None if string == "None" else int(string)
def optional_float(string):
return None if string == "None" else float(string)
def compression_ratio(text) -> float:
text_bytes = text.encode("utf-8")
return len(text_bytes) / len(zlib.compress(text_bytes))
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = '.'):
assert seconds >= 0, "non-negative timestamp expected"
milliseconds = round(seconds * 1000.0)
hours = milliseconds // 3_600_000
milliseconds -= hours * 3_600_000
minutes = milliseconds // 60_000
milliseconds -= minutes * 60_000
seconds = milliseconds // 1_000
milliseconds -= seconds * 1_000
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
def write_txt(transcript: Iterator[dict], file: TextIO):
for segment in transcript:
print(segment['text'].strip(), file=file, flush=True)
def write_vtt(transcript: Iterator[dict], file: TextIO):
print("WEBVTT\n", file=file)
for segment in transcript:
print(
f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
def write_srt(transcript: Iterator[dict], file: TextIO):
"""
Write a transcript to a file in SRT format.
Example usage:
from pathlib import Path
from whisper.utils import write_srt
result = transcribe(model, audio_path, temperature=temperature, **args)
# save SRT
audio_basename = Path(audio_path).stem
with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
write_srt(result["segments"], file=srt)
"""
for i, segment in enumerate(transcript, start=1):
# write srt lines
print(
f"{i}\n"
f"{format_timestamp(segment['start'], always_include_hours=True, decimal_marker=',')} --> "
f"{format_timestamp(segment['end'], always_include_hours=True, decimal_marker=',')}\n"
f"{segment['text'].strip().replace('-->', '->')}\n",
file=file,
flush=True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Any, Sequence, Tuple
import numpy as np
import torch
from .base_data_element import BaseDataElement
DATA_BATCH = Sequence[Tuple[Any, BaseDataElement]]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
seed: int) -> None:
"""This function will be called on each worker subprocess after seeding and
before data loading.
Args:
worker_id (int): Worker id in [0, num_workers - 1].
num_workers (int): How many subprocesses to use for data loading.
rank (int): Rank of process in distributed environment. If in
non-distributed environment, it is a constant number `0`.
seed (int): Random seed.
"""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
def pseudo_collate(data_batch: DATA_BATCH) -> DATA_BATCH:
"""The default behavior of dataloader is to merge a list of samples to form
a mini-batch of Tensor(s). However, in MMEngine, ``pseudo_collate`` does
nothing just returns ``data_batch``.
Args:
data_batch (Sequence[Tuple[Any, BaseDataElement]]): Batch of data from
dataloader.
Returns:
Sequence[Tuple[Any, BaseDataElement]]: Return input ``data_batch``.
"""
return data_batch
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import Any, Sequence, Tuple
import numpy as np
import torch
from .base_data_sample import BaseDataSample
DATA_BATCH = Sequence[Tuple[Any, BaseDataSample]]
def worker_init_fn(worker_id: int, num_workers: int, rank: int,
seed: int) -> None:
"""This function will be called on each worker subprocess after seeding and
before data loading.
Args:
worker_id (int): Worker id in [0, num_workers - 1].
num_workers (int): How many subprocesses to use for data loading.
rank (int): Rank of process in distributed environment. If in
non-distributed environment, it is a constant number `0`.
seed (int): Random seed.
"""
# The seed of each worker equals to
# num_worker * rank + worker_id + user_seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
def pseudo_collate(data_batch: DATA_BATCH) -> DATA_BATCH:
"""The default behavior of dataloader is to merge a list of samples to form
a mini-batch of Tensor(s). However, in MMEngine, ``pseudo_collate`` does
nothing just returns ``data_batch``.
Args:
data_batch (Sequence[Tuple[Any, BaseDataSample]]): Batch of data from
dataloader.
Returns:
Sequence[Tuple[Any, BaseDataSample]]: Return input ``data_batch``.
"""
return data_batch
|
_base_ = '../faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]))
|
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(plugins=[
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]))
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.77%
Accuracy@3: 64.40%
Accuracy@5: 66.87%
Accuracy@10: 71.83%
Precision@1: 50.77%
Precision@3: 40.45%
Precision@5: 34.06%
Precision@10: 25.98%
Recall@1: 6.27%
Recall@3: 11.69%
Recall@5: 13.74%
Recall@10: 17.23%
MRR@10: 0.5814
NDCG@10: 0.3621
MAP@100: 0.1838
Model Query Sparsity: Active Dimensions: 40.0, Sparsity Ratio: 0.9987
Model Corpus Sparsity: Active Dimensions: 206.2, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.Primary metric value: 0.3621
|
import logging
import random
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseInformationRetrievalEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load the NFcorpus IR dataset (https://huggingface.co/datasets/BeIR/nfcorpus, https://huggingface.co/datasets/BeIR/nfcorpus-qrels)
corpus = load_dataset("BeIR/nfcorpus", "corpus", split="corpus")
queries = load_dataset("BeIR/nfcorpus", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/nfcorpus-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 1,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=1000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-nfcorpus-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
"""
Queries: 323
Corpus: 3269
Score-Function: dot
Accuracy@1: 50.46%
Accuracy@3: 64.40%
Accuracy@5: 67.49%
Accuracy@10: 72.14%
Precision@1: 50.46%
Precision@3: 40.87%
Precision@5: 34.12%
Precision@10: 26.10%
Recall@1: 6.11%
Recall@3: 11.73%
Recall@5: 13.64%
Recall@10: 17.24%
MRR@10: 0.5801
NDCG@10: 0.3626
MAP@100: 0.1832
Model Query Sparsity: Active Dimensions: 43.1, Sparsity Ratio: 0.9986
Model Corpus Sparsity: Active Dimensions: 207.0, Sparsity Ratio: 0.9932
"""
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
# => Primary metric: BeIR-nfcorpus-subset-test_dot_ndcg@10
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.3626
|
import numpy as np
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'dtype,result_type',
[
('int64', 'int64'),
('float64', 'float64'),
('int8', 'int8'),
('double', 'float64'),
],
)
def test_dtype(dtype, result_type):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == result_type
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
import numpy as np
import pytest
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.computation.tensorflow_backend import TensorFlowCompBackend
from docarray.typing import TensorFlowTensor
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((5), 1),
((1, 5), 2),
((5, 5), 2),
((), 0),
],
)
def test_n_dim(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
assert TensorFlowCompBackend.n_dim(array) == result
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'shape,result',
[
((10,), (10,)),
((5, 5), (5, 5)),
((), ()),
],
)
def test_shape(shape, result):
array = TensorFlowTensor(tf.zeros(shape))
shape = TensorFlowCompBackend.shape(array)
assert shape == result
assert type(shape) == tuple
@pytest.mark.tensorflow
def test_to_device():
array = TensorFlowTensor(tf.constant([1, 2, 3]))
array = TensorFlowCompBackend.to_device(array, 'CPU:0')
assert array.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
@pytest.mark.parametrize('dtype', ['int64', 'float64', 'int8', 'double'])
def test_dtype(dtype):
array = TensorFlowTensor(tf.constant([1, 2, 3], dtype=getattr(tf, dtype)))
assert TensorFlowCompBackend.dtype(array) == dtype
@pytest.mark.tensorflow
def test_empty():
array = TensorFlowCompBackend.empty((10, 3))
assert array.tensor.shape == (10, 3)
@pytest.mark.tensorflow
def test_empty_dtype():
tf_tensor = TensorFlowCompBackend.empty((10, 3), dtype=tf.int32)
assert tf_tensor.tensor.shape == (10, 3)
assert tf_tensor.tensor.dtype == tf.int32
@pytest.mark.tensorflow
def test_empty_device():
tensor = TensorFlowCompBackend.empty((10, 3), device='CPU:0')
assert tensor.tensor.shape == (10, 3)
assert tensor.tensor.device.endswith('CPU:0')
@pytest.mark.tensorflow
def test_squeeze():
tensor = TensorFlowTensor(tf.zeros(shape=(1, 1, 3, 1)))
squeezed = TensorFlowCompBackend.squeeze(tensor)
assert squeezed.tensor.shape == (3,)
@pytest.mark.tensorflow
@pytest.mark.parametrize(
'data_input,t_range,x_range,data_result',
[
(
[0, 1, 2, 3, 4, 5],
(0, 10),
None,
[0, 2, 4, 6, 8, 10],
),
(
[0, 1, 2, 3, 4, 5],
(0, 10),
(0, 10),
[0, 1, 2, 3, 4, 5],
),
(
[[0.0, 1.0], [0.0, 1.0]],
(0, 10),
None,
[[0.0, 10.0], [0.0, 10.0]],
),
],
)
def test_minmax_normalize(data_input, t_range, x_range, data_result):
array = TensorFlowTensor(tf.constant(data_input))
output = TensorFlowCompBackend.minmax_normalize(
tensor=array, t_range=t_range, x_range=x_range
)
assert np.allclose(output.tensor, tf.constant(data_result))
@pytest.mark.tensorflow
def test_reshape():
tensor = TensorFlowTensor(tf.zeros((3, 224, 224)))
reshaped = TensorFlowCompBackend.reshape(tensor, (224, 224, 3))
assert reshaped.tensor.shape == (224, 224, 3)
@pytest.mark.tensorflow
def test_stack():
t0 = TensorFlowTensor(tf.zeros((3, 224, 224)))
t1 = TensorFlowTensor(tf.ones((3, 224, 224)))
stacked1 = TensorFlowCompBackend.stack([t0, t1], dim=0)
assert isinstance(stacked1, TensorFlowTensor)
assert stacked1.tensor.shape == (2, 3, 224, 224)
stacked2 = TensorFlowCompBackend.stack([t0, t1], dim=-1)
assert isinstance(stacked2, TensorFlowTensor)
assert stacked2.tensor.shape == (3, 224, 224, 2)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_internvl import *
from .modeling_internvl import *
from .processing_internvl import *
from .video_processing_internvl import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_internvl import *
from .modeling_internvl import *
from .processing_internvl import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import UsageTransactionMetadata, block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
),
)
return cost
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(loop_scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(loop_scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import UsageTransactionMetadata, block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
),
)
return cost
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
"""
This example trains a SparseEncoder for the Natural Questions (NQ) dataset.
The training script fine-tunes a SparseEncoder using the Splade loss function for retrieval.
It loads a subset of the Natural Questions dataset, splits it into training and evaluation subsets,
and trains the model as a retriever. After training, the model is evaluated and saved locally,
with an optional step to push the trained model to the Hugging Face Hub.
Usage:
python train_splade_nq.py
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
from sentence_transformers.training_args import BatchSamplers
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
# 1a. Load a model to finetune with 1b. (Optional) model card data
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Natural Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
print("Model max length:", model.max_seq_length)
# 2. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss.
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator
evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-nq"
training_args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=2e-5,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, useful to include these in the model card
evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
"""
This example trains a SparseEncoder for the Natural Questions (NQ) task.
The training script fine-tunes a SparseEncoder using the Splade loss function for retrieval.
It loads a subset of the Natural Questions dataset, splits it into training and evaluation subsets,
and trains the model as a retriever. After training, the model is evaluated and saved locally,
with an optional step to push the trained model to the Hugging Face Hub.
Usage:
python train_splade_nq.py
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
from sentence_transformers.training_args import BatchSamplers
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
# 1a. Load a model to finetune with 1b. (Optional) model card data
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Natural Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
print("Model max length:", model.max_seq_length)
# 2. Load the NQ dataset: https://huggingface.co/datasets/sentence-transformers/natural-questions
logging.info("Read the Natural Questions training dataset")
full_dataset = load_dataset("sentence-transformers/natural-questions", split="train").select(range(100_000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss.
lambda_query = 5e-5
lambda_corpus = 3e-5
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator
evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-nq"
training_args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=2e-5,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, useful to include these in the model card
evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
evaluation_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch. It generates sentence embeddings
that can be compared using cosine-similarity to measure the similarity.
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
train_batch_size = 16
num_epochs = 4
output_dir = (
"output/training_stsbenchmark_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CosineSimilarityLoss (https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) needs two text columns and one
# similarity score column (between 0 and 1)
train_loss = losses.CosineSimilarityLoss(model=model)
# train_loss = losses.CoSENTLoss(model=model)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts')`."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .optimizers import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403
from .data_structures import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403
from .hook import * # noqa: F401, F403
from .mask import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403
from .utils import * # noqa: F401, F403
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""Convert a PIL Image to a tensor of the same type - this does not scale values.
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""Convert a tensor or an ndarray to PIL Image
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""Convert all TVTensors to pure tensors, removing associated metadata (if any).
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import functional as F, Transform
from torchvision.transforms.v2._utils import is_pure_tensor
class PILToTensor(Transform):
"""[BETA] Convert a PIL Image to a tensor of the same type - this does not scale values.
.. v2betastatus:: PILToTensor transform
This transform does not support torchscript.
Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
"""
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: PIL.Image.Image, params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImage(Transform):
"""[BETA] Convert a tensor, ndarray, or PIL Image to :class:`~torchvision.tv_tensors.Image`
; this does not scale values.
.. v2betastatus:: ToImage transform
This transform does not support torchscript.
"""
_transformed_types = (is_pure_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> tv_tensors.Image:
return F.to_image(inpt)
class ToPILImage(Transform):
"""[BETA] Convert a tensor or an ndarray to PIL Image
.. v2betastatus:: ToPILImage transform
This transform does not support torchscript.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL Image while adjusting the value range depending on the ``mode``.
Args:
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
- If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
- If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
- If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
- If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
``short``).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
"""
_transformed_types = (is_pure_tensor, tv_tensors.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_pil_image(inpt, mode=self.mode)
class ToPureTensor(Transform):
"""[BETA] Convert all TVTensors to pure tensors, removing associated metadata (if any).
.. v2betastatus:: ToPureTensor transform
This doesn't scale or change the values, only the type.
"""
_transformed_types = (tv_tensors.TVTensor,)
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
return inpt.as_subclass(torch.Tensor)
|
from __future__ import annotations
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super().__init__()
def forward(self, features: dict[str, Tensor]) -> dict[str, Tensor]:
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path) -> None:
pass
@staticmethod
def load(input_path) -> Normalize:
return Normalize()
|
from __future__ import annotations
import torch.nn.functional as F
from torch import Tensor, nn
class Normalize(nn.Module):
"""This layer normalizes embeddings to unit length"""
def __init__(self) -> None:
super(Normalize, self).__init__()
def forward(self, features: dict[str, Tensor]) -> dict[str, Tensor]:
features.update({"sentence_embedding": F.normalize(features["sentence_embedding"], p=2, dim=1)})
return features
def save(self, output_path) -> None:
pass
@staticmethod
def load(input_path) -> "Normalize":
return Normalize()
|
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import torch
from torch import Tensor
from torch import nn
from typing import List, Dict
import os
import json
import logging
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import STFTInitializer
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize
from keras.src.initializers import get
from keras.src.initializers import serialize
from keras.src.initializers.constant_initializers import Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer
from keras.src.initializers.random_initializers import GlorotNormal
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import GlorotUniform
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import LecunNormal
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import LecunUniform
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import OrthogonalInitializer
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as Orthogonal,
)
from keras.src.initializers.random_initializers import (
OrthogonalInitializer as orthogonal,
)
from keras.src.initializers.random_initializers import RandomNormal
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import RandomUniform
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import TruncatedNormal
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import VarianceScaling
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
|
"""Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""Loads list of documents from PDF file and also accepts extra information in dict format."""
return self.load(file_path, metadata=metadata, extra_info=extra_info)
def load(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""
Loads list of documents from PDF file and also accepts extra information in dict format.
Args:
file_path (Union[Path, str]): file path of PDF file (accepts string or Path).
metadata (bool, optional): if metadata to be included or not. Defaults to True.
extra_info (Optional[Dict], optional): extra information related to each document in dict format. Defaults to None.
Raises:
TypeError: if extra_info is not a dictionary.
TypeError: if file_path is not a string or Path.
Returns:
List[Document]: list of documents.
"""
import fitz
# check if file_path is a string or Path
if not isinstance(file_path, str) and not isinstance(file_path, Path):
raise TypeError("file_path must be a string or Path.")
# open PDF file
doc = fitz.open(file_path)
# if extra_info is not None, check if it is a dictionary
if extra_info:
if not isinstance(extra_info, dict):
raise TypeError("extra_info must be a dictionary.")
# if metadata is True, add metadata to each document
if metadata:
if not extra_info:
extra_info = {}
extra_info["total_pages"] = len(doc)
extra_info["file_path"] = str(file_path)
# return list of documents
return [
Document(
text=page.get_text().encode("utf-8"),
extra_info=dict(
extra_info,
**{
"source": f"{page.number+1}",
},
),
)
for page in doc
]
else:
return [
Document(
text=page.get_text().encode("utf-8"), extra_info=extra_info or {}
)
for page in doc
]
|
"""Read PDF files using PyMuPDF library."""
from pathlib import Path
from typing import Dict, List, Optional, Union
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PyMuPDFReader(BaseReader):
"""Read PDF files using PyMuPDF library."""
def load_data(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""Loads list of documents from PDF file and also accepts extra information in dict format."""
return self.load(file_path, metadata=metadata, extra_info=extra_info)
def load(
self,
file_path: Union[Path, str],
metadata: bool = True,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""Loads list of documents from PDF file and also accepts extra information in dict format.
Args:
file_path (Union[Path, str]): file path of PDF file (accepts string or Path).
metadata (bool, optional): if metadata to be included or not. Defaults to True.
extra_info (Optional[Dict], optional): extra information related to each document in dict format. Defaults to None.
Raises:
TypeError: if extra_info is not a dictionary.
TypeError: if file_path is not a string or Path.
Returns:
List[Document]: list of documents.
"""
import fitz
# check if file_path is a string or Path
if not isinstance(file_path, str) and not isinstance(file_path, Path):
raise TypeError("file_path must be a string or Path.")
# open PDF file
doc = fitz.open(file_path)
# if extra_info is not None, check if it is a dictionary
if extra_info:
if not isinstance(extra_info, dict):
raise TypeError("extra_info must be a dictionary.")
# if metadata is True, add metadata to each document
if metadata:
if not extra_info:
extra_info = {}
extra_info["total_pages"] = len(doc)
extra_info["file_path"] = str(file_path)
# return list of documents
return [
Document(
text=page.get_text().encode("utf-8"),
extra_info=dict(
extra_info,
**{
"source": f"{page.number+1}",
},
),
)
for page in doc
]
else:
return [
Document(
text=page.get_text().encode("utf-8"), extra_info=extra_info or {}
)
for page in doc
]
|
import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a string.
Args:
output: A string that may contain a valid JSON object or array surrounded by
extraneous characters or information.
Returns:
A string containing a valid JSON object or array.
"""
output = output.strip()
left_square = output.find("[")
left_brace = output.find("{")
if left_square < left_brace and left_square != -1:
left = left_square
right = output.rfind("]")
else:
left = left_brace
right = output.rfind("}")
return output[left : right + 1]
def parse_json_markdown(text: str) -> Any:
if "```json" in text:
text = text.split("```json")[1].strip().strip("```").strip()
json_string = _marshal_llm_to_json(text)
try:
json_obj = json.loads(json_string)
except json.JSONDecodeError as e_json:
try:
# NOTE: parsing again with pyyaml
# pyyaml is less strict, and allows for trailing commas
# right now we rely on this since guidance program generates
# trailing commas
json_obj = yaml.safe_load(json_string)
except yaml.YAMLError as e_yaml:
raise OutputParserException(
f"Got invalid JSON object. Error: {e_json} {e_yaml}. "
f"Got JSON string: {json_string}"
)
except NameError as exc:
raise ImportError("Please pip install PyYAML.") from exc
return json_obj
def parse_code_markdown(text: str, only_last: bool) -> List[str]:
# Regular expression pattern to match code within triple-backticks
pattern = r"```(.*?)```"
# Regular expression pattern to match code within triple backticks with
# a Python marker. Like: ```python df.columns```
python_str_pattern = re.compile(r"^```python", re.IGNORECASE)
text = python_str_pattern.sub("```", text)
# Find all matches of the pattern in the text
matches = re.findall(pattern, text, re.DOTALL)
# Return the last matched group if requested
code = matches[-1] if matches and only_last else matches
# If empty we optimistically assume the output is the code
if not code:
# we want to handle cases where the code may start or end with triple
# backticks
# we also want to handle cases where the code is surrounded by regular
# quotes
# we can't just remove all backticks due to JS template strings
candidate = text.strip()
if candidate.startswith('"') and candidate.endswith('"'):
candidate = candidate[1:-1]
if candidate.startswith("'") and candidate.endswith("'"):
candidate = candidate[1:-1]
if candidate.startswith("`") and candidate.endswith("`"):
candidate = candidate[1:-1]
# For triple backticks we split the handling of the start and end
# partly because there can be cases where only one and not the other
# is present, and partly because we don't need to be so worried
# about it being a string in a programming language
if candidate.startswith("```"):
candidate = re.sub(r"^```[a-zA-Z]*", "", candidate)
if candidate.endswith("```"):
candidate = candidate[:-3]
code = [candidate.strip()]
return code
def extract_json_str(text: str) -> str:
"""Extract JSON string from text."""
# NOTE: this regex parsing is taken from langchain.output_parsers.pydantic
match = re.search(r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL)
if not match:
raise ValueError(f"Could not extract json string from output: {text}")
return match.group()
|
import contextlib
import json
import re
from typing import Any, List
with contextlib.suppress(ImportError):
import yaml
from llama_index.core.output_parsers.base import OutputParserException
def _marshal_llm_to_json(output: str) -> str:
"""
Extract a substring containing valid JSON or array from a string.
Args:
output: A string that may contain a valid JSON object or array surrounded by
extraneous characters or information.
Returns:
A string containing a valid JSON object or array.
"""
output = output.strip()
left_square = output.find("[")
left_brace = output.find("{")
if left_square < left_brace and left_square != -1:
left = left_square
right = output.rfind("]")
else:
left = left_brace
right = output.rfind("}")
return output[left : right + 1]
def parse_json_markdown(text: str) -> Any:
if "```json" in text:
text = text.split("```json")[1].strip().strip("```").strip()
json_string = _marshal_llm_to_json(text)
try:
json_obj = json.loads(json_string)
except json.JSONDecodeError as e_json:
try:
# NOTE: parsing again with pyyaml
# pyyaml is less strict, and allows for trailing commas
# right now we rely on this since guidance program generates
# trailing commas
json_obj = yaml.safe_load(json_string)
except yaml.YAMLError as e_yaml:
raise OutputParserException(
f"Got invalid JSON object. Error: {e_json} {e_yaml}. "
f"Got JSON string: {json_string}"
)
except NameError as exc:
raise ImportError("Please pip install PyYAML.") from exc
return json_obj
def parse_code_markdown(text: str, only_last: bool) -> List[str]:
# Regular expression pattern to match code within triple-backticks
pattern = r"```(.*?)```"
# Find all matches of the pattern in the text
matches = re.findall(pattern, text, re.DOTALL)
# Return the last matched group if requested
code = matches[-1] if matches and only_last else matches
# If empty we optimistically assume the output is the code
if not code:
# we want to handle cases where the code may start or end with triple
# backticks
# we also want to handle cases where the code is surrounded by regular
# quotes
# we can't just remove all backticks due to JS template strings
candidate = text.strip()
if candidate.startswith('"') and candidate.endswith('"'):
candidate = candidate[1:-1]
if candidate.startswith("'") and candidate.endswith("'"):
candidate = candidate[1:-1]
if candidate.startswith("`") and candidate.endswith("`"):
candidate = candidate[1:-1]
# For triple backticks we split the handling of the start and end
# partly because there can be cases where only one and not the other
# is present, and partly because we don't need to be so worried
# about it being a string in a programming language
if candidate.startswith("```"):
candidate = re.sub(r"^```[a-zA-Z]*", "", candidate)
if candidate.endswith("```"):
candidate = candidate[:-3]
code = [candidate.strip()]
return code
def extract_json_str(text: str) -> str:
"""Extract JSON string from text."""
# NOTE: this regex parsing is taken from langchain.output_parsers.pydantic
match = re.search(r"\{.*\}", text.strip(), re.MULTILINE | re.IGNORECASE | re.DOTALL)
if not match:
raise ValueError(f"Could not extract json string from output: {text}")
return match.group()
|
"""Test LLM program."""
import json
import pytest
from typing import Sequence
from unittest.mock import MagicMock
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.llms import LLMMetadata
from llama_index.core.output_parsers.pydantic import PydanticOutputParser
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.llms import ImageBlock, ChatResponse, ChatMessage
@pytest.fixture()
def image_url() -> str:
return "https://astrabert.github.io/hophop-science/images/whale_doing_science.png"
class MagicLLM(MagicMock):
def chat(self, messages: Sequence[ChatMessage]) -> ChatResponse:
test_object = {"hello": "world"}
text = json.dumps(test_object)
return ChatResponse(message=ChatMessage(role="assistant", content=text))
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata()
class TestModel(BaseModel):
__test__ = False
hello: str
def test_multi_modal_llm_program(image_url: str) -> None:
"""Test Multi Modal LLM Pydantic program."""
output_parser = PydanticOutputParser(output_cls=TestModel)
multi_modal_llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=output_parser,
prompt_template_str="This is a test prompt with a {test_input}.",
multi_modal_llm=MagicLLM(),
image_documents=[ImageBlock(url=image_url)],
)
# mock Multi Modal llm
obj_output = multi_modal_llm_program(test_input="hello")
assert isinstance(obj_output, TestModel)
assert obj_output.hello == "world"
|
"""Test LLM program."""
import json
from typing import Sequence
from unittest.mock import MagicMock
from llama_index.core.base.llms.types import (
CompletionResponse,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.multi_modal_llms import MultiModalLLMMetadata
from llama_index.core.output_parsers.pydantic import PydanticOutputParser
from llama_index.core.program import MultiModalLLMCompletionProgram
from llama_index.core.schema import ImageDocument
class MockMultiModalLLM(MagicMock):
def complete(
self, prompt: str, image_documents: Sequence[ImageDocument]
) -> CompletionResponse:
test_object = {"hello": "world"}
text = json.dumps(test_object)
return CompletionResponse(text=text)
@property
def metadata(self) -> MultiModalLLMMetadata:
return MultiModalLLMMetadata()
class TestModel(BaseModel):
__test__ = False
hello: str
def test_multi_modal_llm_program() -> None:
"""Test Multi Modal LLM Pydantic program."""
output_parser = PydanticOutputParser(output_cls=TestModel)
multi_modal_llm_program = MultiModalLLMCompletionProgram.from_defaults(
output_parser=output_parser,
prompt_template_str="This is a test prompt with a {test_input}.",
multi_modal_llm=MockMultiModalLLM(),
image_documents=[ImageDocument()],
)
# mock Multi Modal llm
obj_output = multi_modal_llm_program(test_input="hello")
assert isinstance(obj_output, TestModel)
assert obj_output.hello == "world"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.