input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from __future__ import annotations
import os
import platform
import subprocess
from .optional_submodules import checkout_nccl
from .setup_helpers.cmake import CMake, USE_NINJA
from .setup_helpers.env import (
check_env_flag,
check_negative_env_flag,
IS_64BIT,
IS_WINDOWS,
)
def _get_vc_env(vc_arch: str) -> dict[str, str]:
try:
from setuptools import distutils # type: ignore[import,attr-defined]
return distutils._msvccompiler._get_vc_env(vc_arch) # type: ignore[no-any-return]
except AttributeError:
from setuptools._distutils import (
_msvccompiler, # type: ignore[import,attr-defined]
)
return _msvccompiler._get_vc_env(vc_arch) # type: ignore[no-any-return,attr-defined]
def _overlay_windows_vcvars(env: dict[str, str]) -> dict[str, str]:
vc_arch = "x64" if IS_64BIT else "x86"
if platform.machine() == "ARM64":
vc_arch = "x64_arm64"
# First Win11 Windows on Arm build version that supports x64 emulation
# is 10.0.22000.
win11_1st_version = (10, 0, 22000)
current_win_version = tuple(
int(version_part) for version_part in platform.version().split(".")
)
if current_win_version < win11_1st_version:
vc_arch = "x86_arm64"
print(
"Warning: 32-bit toolchain will be used, but 64-bit linker "
"is recommended to avoid out-of-memory linker error!"
)
print(
"Warning: Please consider upgrading to Win11, where x64 "
"emulation is enabled!"
)
vc_env = _get_vc_env(vc_arch)
# Keys in `_get_vc_env` are always lowercase.
# We turn them into uppercase before overlaying vcvars
# because OS environ keys are always uppercase on Windows.
# https://stackoverflow.com/a/7797329
vc_env = {k.upper(): v for k, v in vc_env.items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
return vc_env
def _create_build_env() -> dict[str, str]:
# XXX - our cmake file sometimes looks at the system environment
# and not cmake flags!
# you should NEVER add something to this list. It is bad practice to
# have cmake read the environment
my_env = os.environ.copy()
if IS_WINDOWS and USE_NINJA:
# When using Ninja under Windows, the gcc toolchain will be chosen as
# default. But it should be set to MSVC as the user's first choice.
my_env = _overlay_windows_vcvars(my_env)
my_env.setdefault("CC", "cl")
my_env.setdefault("CXX", "cl")
return my_env
def build_pytorch(
version: str | None,
cmake_python_library: str | None,
build_python: bool,
rerun_cmake: bool,
cmake_only: bool,
cmake: CMake,
) -> None:
my_env = _create_build_env()
if (
not check_negative_env_flag("USE_CUDA")
and not check_negative_env_flag("USE_NCCL")
and not check_env_flag("USE_SYSTEM_NCCL")
):
checkout_nccl()
build_test = not check_negative_env_flag("BUILD_TEST")
cmake.generate(
version, cmake_python_library, build_python, build_test, my_env, rerun_cmake
)
if cmake_only:
return
build_custom_step = os.getenv("BUILD_CUSTOM_STEP")
if build_custom_step:
try:
output = subprocess.check_output(
build_custom_step,
shell=True,
stderr=subprocess.STDOUT,
text=True,
)
print("Command output:")
print(output)
except subprocess.CalledProcessError as e:
print("Command failed with return code:", e.returncode)
print("Output (stdout and stderr):")
print(e.output)
raise
cmake.build(my_env)
|
from __future__ import annotations
import os
import platform
from .optional_submodules import checkout_nccl
from .setup_helpers.cmake import CMake, USE_NINJA
from .setup_helpers.env import (
check_env_flag,
check_negative_env_flag,
IS_64BIT,
IS_WINDOWS,
)
def _get_vc_env(vc_arch: str) -> dict[str, str]:
try:
from setuptools import distutils # type: ignore[import,attr-defined]
return distutils._msvccompiler._get_vc_env(vc_arch) # type: ignore[no-any-return]
except AttributeError:
from setuptools._distutils import (
_msvccompiler, # type: ignore[import,attr-defined]
)
return _msvccompiler._get_vc_env(vc_arch) # type: ignore[no-any-return,attr-defined]
def _overlay_windows_vcvars(env: dict[str, str]) -> dict[str, str]:
vc_arch = "x64" if IS_64BIT else "x86"
if platform.machine() == "ARM64":
vc_arch = "x64_arm64"
# First Win11 Windows on Arm build version that supports x64 emulation
# is 10.0.22000.
win11_1st_version = (10, 0, 22000)
current_win_version = tuple(
int(version_part) for version_part in platform.version().split(".")
)
if current_win_version < win11_1st_version:
vc_arch = "x86_arm64"
print(
"Warning: 32-bit toolchain will be used, but 64-bit linker "
"is recommended to avoid out-of-memory linker error!"
)
print(
"Warning: Please consider upgrading to Win11, where x64 "
"emulation is enabled!"
)
vc_env = _get_vc_env(vc_arch)
# Keys in `_get_vc_env` are always lowercase.
# We turn them into uppercase before overlaying vcvars
# because OS environ keys are always uppercase on Windows.
# https://stackoverflow.com/a/7797329
vc_env = {k.upper(): v for k, v in vc_env.items()}
for k, v in env.items():
uk = k.upper()
if uk not in vc_env:
vc_env[uk] = v
return vc_env
def _create_build_env() -> dict[str, str]:
# XXX - our cmake file sometimes looks at the system environment
# and not cmake flags!
# you should NEVER add something to this list. It is bad practice to
# have cmake read the environment
my_env = os.environ.copy()
if IS_WINDOWS and USE_NINJA:
# When using Ninja under Windows, the gcc toolchain will be chosen as
# default. But it should be set to MSVC as the user's first choice.
my_env = _overlay_windows_vcvars(my_env)
my_env.setdefault("CC", "cl")
my_env.setdefault("CXX", "cl")
return my_env
def build_pytorch(
version: str | None,
cmake_python_library: str | None,
build_python: bool,
rerun_cmake: bool,
cmake_only: bool,
cmake: CMake,
) -> None:
my_env = _create_build_env()
if (
not check_negative_env_flag("USE_CUDA")
and not check_negative_env_flag("USE_NCCL")
and not check_env_flag("USE_SYSTEM_NCCL")
):
checkout_nccl()
build_test = not check_negative_env_flag("BUILD_TEST")
cmake.generate(
version, cmake_python_library, build_python, build_test, my_env, rerun_cmake
)
if cmake_only:
return
cmake.build(my_env)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
yield
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
yield
await backend.data.db.disconnect()
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(500, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"])
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
is_template=False,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_status(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`, :class:`~sentence_transformers.sampler.DefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.DefaultBatchSampler`]], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`, :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`, Callable[[...], :class:`~sentence_transformers.sampler.MultiDatasetDefaultBatchSampler`]], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
from __future__ import annotations
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
class CrossEncoderTrainingArguments(SentenceTransformerTrainingArguments):
"""
CrossEncoderTrainingArguments extends :class:`~transformers.TrainingArguments` with additional arguments
specific to Sentence Transformers. See :class:`~transformers.TrainingArguments` for the complete list of
available arguments.
Args:
output_dir (`str`):
The output directory where the model checkpoints will be written.
prompts (`Union[Dict[str, Dict[str, str]], Dict[str, str], str]`, *optional*):
The prompts to use for each column in the training, evaluation and test datasets. Four formats are accepted:
1. `str`: A single prompt to use for all columns in the datasets, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
2. `Dict[str, str]`: A dictionary mapping column names to prompts, regardless of whether the training/evaluation/test
datasets are :class:`datasets.Dataset` or a :class:`datasets.DatasetDict`.
3. `Dict[str, str]`: A dictionary mapping dataset names to prompts. This should only be used if your training/evaluation/test
datasets are a :class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
4. `Dict[str, Dict[str, str]]`: A dictionary mapping dataset names to dictionaries mapping column names to
prompts. This should only be used if your training/evaluation/test datasets are a
:class:`datasets.DatasetDict` or a dictionary of :class:`datasets.Dataset`.
batch_sampler (Union[:class:`~sentence_transformers.training_args.BatchSamplers`, `str`], *optional*):
The batch sampler to use. See :class:`~sentence_transformers.training_args.BatchSamplers` for valid options.
Defaults to ``BatchSamplers.BATCH_SAMPLER``.
multi_dataset_batch_sampler (Union[:class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`, `str`], *optional*):
The multi-dataset batch sampler to use. See :class:`~sentence_transformers.training_args.MultiDatasetBatchSamplers`
for valid options. Defaults to ``MultiDatasetBatchSamplers.PROPORTIONAL``.
"""
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
class PipelineTest(testing.TestCase):
def test_basics(self):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.Pipeline,
init_kwargs={
"layers": [layers.AutoContrast(), layers.RandomBrightness(0.1)],
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_mixed_precision_check=False,
run_training_check=run_training_check,
)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not working in numpy"
)
def test_correctness(self):
pipeline = layers.Pipeline([CanaryLayer(), CanaryLayer()])
x = np.array([0])
mask = np.array([0])
pipeline(x, training=True, mask=mask)
self.assertTrue(pipeline.layers[0].training)
self.assertTrue(pipeline.layers[0].received_mask)
self.assertTrue(pipeline.layers[1].training)
self.assertTrue(pipeline.layers[1].received_mask)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="Fails on CI, passes locally. TODO: debug",
)
def test_from_config(self):
pipeline = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
x = np.ones((2, 10, 12, 3))
output = pipeline(x)
restored = layers.Pipeline.from_config(pipeline.get_config())
restored_output = restored(x)
self.assertEqual(tuple(output.shape), (2, 8, 9, 3))
self.assertAllClose(output, restored_output)
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
class PipelineTest(testing.TestCase):
def test_basics(self):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.Pipeline,
init_kwargs={
"layers": [layers.AutoContrast(), layers.RandomBrightness(0.1)],
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_mixed_precision_check=False,
run_training_check=run_training_check,
)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not working in numpy"
)
def test_correctness(self):
pipeline = layers.Pipeline([CanaryLayer(), CanaryLayer()])
x = np.array([0])
mask = np.array([0])
pipeline(x, training=True, mask=mask)
self.assertTrue(pipeline.layers[0].training)
self.assertTrue(pipeline.layers[0].received_mask)
self.assertTrue(pipeline.layers[1].training)
self.assertTrue(pipeline.layers[1].received_mask)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
def test_from_config(self):
pipeline = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
x = np.ones((2, 10, 12, 3))
output = pipeline(x)
restored = layers.Pipeline.from_config(pipeline.get_config())
restored_output = restored(x)
self.assertEqual(tuple(output.shape), (2, 8, 9, 3))
self.assertAllClose(output, restored_output)
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
type = None if v['type'] == 'null' else v['type']
table.append(f'| `{k}` | {desc} | `{type}` | `{v["default"]}` |')
with open(f'../docs/concepts/flow/{s}-args.md', 'w') as fp:
fp.write('\n'.join(table))
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
table.append(f'| `{k}` | {desc} | `{v["type"]}` | `{v["default"]}` |')
with open(f'../docs/concepts/flow/{s}-args.md', 'w') as fp:
fp.write('\n'.join(table))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
"scikit-image": ("0.17.2", "docs, examples, tests"),
"pandas": ("1.1.5", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("4.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.6", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
"scikit-image": ("0.17.2", "docs, examples, tests"),
"pandas": ("1.1.5", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("4.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.16.0", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.6", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
import os.path
from pathlib import Path
from typing import Any, Callable, List, Optional, Tuple, Union
from PIL import Image
from .vision import VisionDataset
class CocoDetection(VisionDataset):
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (str or ``pathlib.Path``): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(
self,
root: Union[str, Path],
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
return Image.open(os.path.join(self.root, path)).convert("RGB")
def _load_target(self, id: int) -> List[Any]:
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
if not isinstance(index, int):
raise ValueError(f"Index must be of type integer, got {type(index)} instead.")
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.ids)
class CocoCaptions(CocoDetection):
"""`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (str or ``pathlib.Path``): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.PILToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def _load_target(self, id: int) -> List[str]:
return [ann["caption"] for ann in super()._load_target(id)]
|
import os.path
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .vision import VisionDataset
class CocoDetection(VisionDataset):
"""`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
"""
def __init__(
self,
root: str,
annFile: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
) -> None:
super().__init__(root, transforms, transform, target_transform)
from pycocotools.coco import COCO
self.coco = COCO(annFile)
self.ids = list(sorted(self.coco.imgs.keys()))
def _load_image(self, id: int) -> Image.Image:
path = self.coco.loadImgs(id)[0]["file_name"]
return Image.open(os.path.join(self.root, path)).convert("RGB")
def _load_target(self, id: int) -> List[Any]:
return self.coco.loadAnns(self.coco.getAnnIds(id))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
if not isinstance(index, int):
raise ValueError(f"Index must be of type integer, got {type(index)} instead.")
id = self.ids[index]
image = self._load_image(id)
target = self._load_target(id)
if self.transforms is not None:
image, target = self.transforms(image, target)
return image, target
def __len__(self) -> int:
return len(self.ids)
class CocoCaptions(CocoDetection):
"""`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset.
It requires the `COCO API to be installed <https://github.com/pdollar/coco/tree/master/PythonAPI>`_.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.PILToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def _load_target(self, id: int) -> List[str]:
return [ann["caption"] for ann in super()._load_target(id)]
|
# mypy: allow-untyped-defs
import logging
from collections.abc import Sequence
from typing import cast
from ... import config
from ...codecache import code_hash, get_path
from ...scheduler import BaseSchedulerNode, BaseScheduling, SchedulerNode
from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_product
from ...virtualized import V
from ..common import IndentedBuffer
from .rocm_template_buffer import ROCmTemplateBuffer
log = logging.getLogger(__name__)
class ROCmCPPScheduling(BaseScheduling):
"""
Partial Scheduling implementation for ROCm C++ Kernels.
This class is intended to be used in combination with TritonScheduling,
and delegated to by CUDACombinedScheduling.
It handles fusion decisions and ROCm C++ specific template code generation.
"""
def group_fn(self, sizes):
return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes)
@staticmethod
def is_rocm_cpp_template(node: BaseSchedulerNode) -> bool:
return isinstance(node, SchedulerNode) and isinstance(
node.node, ROCmTemplateBuffer
)
def can_fuse_vertical(
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
return False
def define_kernel(self, src_code: str, node_schedule) -> str:
wrapper = V.graph.wrapper_code
if src_code in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code]
else:
fused_name = (
get_fused_kernel_name(node_schedule, config.triton.descriptive_names)
if config.triton.descriptive_names
else ""
)
kernel_name = "_".join(["rocm", fused_name, wrapper.next_kernel_suffix()])
# use the original src_code as the key
wrapper.src_to_kernel[src_code] = kernel_name
src_code = src_code.replace("KERNEL_NAME", kernel_name)
_, _, kernel_path = get_path(code_hash(src_code), "py")
compile_wrapper = IndentedBuffer()
compile_wrapper.writeline("async_compile.rocm(r'''")
compile_wrapper.splice(src_code, strip=True)
compile_wrapper.writeline(
f"''', 'so', aot_compile={str(V.graph.aot_mode)})"
)
metadata_comment = f"# kernel path: {kernel_path}"
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
metadata_comment += "\n" + origins + "\n" + detailed_origins
wrapper.define_kernel(
kernel_name, compile_wrapper.getvalue(), metadata_comment
)
return kernel_name
def codegen_template(
self,
template_node: BaseSchedulerNode,
epilogue_nodes: Sequence[BaseSchedulerNode],
prologue_nodes: Sequence[BaseSchedulerNode],
):
"""
Codegen a ROCm template, possibly with fused epilogues
"""
assert self.is_rocm_cpp_template(template_node), (
"Template node passed to ROCmScheduler.codegen_template must be a SchedulerNode that wraps a ROCmTemplateBuffer"
)
template_node = cast(SchedulerNode, template_node)
_, (_numel, rnumel) = template_node.group
assert rnumel == 1
ctb: ROCmTemplateBuffer = cast(ROCmTemplateBuffer, template_node.node)
kernel, render = ctb.make_kernel_render(ctb) # type: ignore[misc]
with kernel:
template_node.mark_run()
src_code = render()
with V.set_kernel_handler(kernel):
node_schedule = [template_node]
kernel_name = self.define_kernel(src_code, node_schedule)
kernel.call_kernel(kernel_name, ctb)
V.graph.removed_buffers |= kernel.removed_buffers
self.free_buffers_in_scheduler()
|
# mypy: allow-untyped-defs
import logging
from collections.abc import Sequence
from typing import cast
from ... import config
from ...codecache import code_hash, get_path
from ...scheduler import BaseSchedulerNode, BaseScheduling, SchedulerNode
from ...utils import get_fused_kernel_name, get_kernel_metadata, sympy_product
from ...virtualized import V
from ..common import IndentedBuffer
from .rocm_template_buffer import ROCmTemplateBuffer
log = logging.getLogger(__name__)
class ROCmCPPScheduling(BaseScheduling):
"""
Partial Scheduling implementation for ROCm C++ Kernels.
This class is intended to be used in combination with TritonScheduling,
and delegated to by CUDACombinedScheduling.
It handles fusion decisions and ROCm C++ specific template code generation.
"""
def group_fn(self, sizes):
return tuple(V.graph.sizevars.simplify(sympy_product(s)) for s in sizes)
@staticmethod
def is_rocm_cpp_template(node: BaseSchedulerNode) -> bool:
return isinstance(node, SchedulerNode) and isinstance(
node.node, ROCmTemplateBuffer
)
def can_fuse_vertical(
self, node1: BaseSchedulerNode, node2: BaseSchedulerNode
) -> bool:
return False
def define_kernel(self, src_code: str, node_schedule) -> str:
wrapper = V.graph.wrapper_code
if src_code in wrapper.src_to_kernel:
kernel_name = wrapper.src_to_kernel[src_code]
else:
fused_name = (
get_fused_kernel_name(node_schedule, config.triton.descriptive_names)
if config.triton.descriptive_names
else ""
)
kernel_name = "_".join(["rocm", fused_name, wrapper.next_kernel_suffix()])
# use the original src_code as the key
wrapper.src_to_kernel[src_code] = kernel_name
src_code = src_code.replace("KERNEL_NAME", kernel_name)
_, _, kernel_path = get_path(code_hash(src_code), "py")
compile_wrapper = IndentedBuffer()
compile_wrapper.writeline("async_compile.rocm(r'''")
compile_wrapper.splice(src_code, strip=True)
compile_wrapper.writeline(
f"''', 'so', aot_compile={str(V.graph.aot_mode)})"
)
metadata_comment = f"# kernel path: {kernel_path}"
origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper)
metadata_comment += "\n" + origins + "\n" + detailed_origins
wrapper.define_kernel(
kernel_name, compile_wrapper.getvalue(), metadata_comment
)
return kernel_name
def codegen_template(
self,
template_node: BaseSchedulerNode,
epilogue_nodes: Sequence[BaseSchedulerNode],
prologue_nodes: Sequence[BaseSchedulerNode],
):
"""
Codegen a ROCm template, possibly with fused epilogues
"""
assert self.is_rocm_cpp_template(template_node), (
"Template node passed to ROCmScheduler.codegen_template must be a SchedulerNode that wraps a ROCmTemplateBuffer"
)
template_node = cast(SchedulerNode, template_node)
_, (_numel, rnumel) = template_node.group
assert rnumel == 1
ctb: ROCmTemplateBuffer = cast(ROCmTemplateBuffer, template_node.node)
kernel, render = ctb.make_kernel_render(ctb)
with kernel:
template_node.mark_run()
src_code = render()
with V.set_kernel_handler(kernel):
node_schedule = [template_node]
kernel_name = self.define_kernel(src_code, node_schedule)
kernel.call_kernel(kernel_name, ctb)
V.graph.removed_buffers |= kernel.removed_buffers
self.free_buffers_in_scheduler()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
if 'gt_semantic_seg' in cfg.train_pipeline[-1]['keys']:
cfg.data.train.pipeline = [
p for p in cfg.data.train.pipeline if p['type'] != 'SegRescale'
]
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
gt_seg = item.get('gt_semantic_seg', None)
if gt_seg is not None:
pad_value = 255 # the padding value of gt_seg
sem_labels = np.unique(gt_seg)
all_labels = np.concatenate((gt_labels, sem_labels), axis=0)
all_labels, counts = np.unique(all_labels, return_counts=True)
stuff_labels = all_labels[np.logical_and(counts < 2,
all_labels != pad_value)]
stuff_masks = gt_seg[None] == stuff_labels[:, None, None]
gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)
gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),
axis=0)
# If you need to show the bounding boxes,
# please comment the following line
gt_bboxes = None
imshow_det_bboxes(
item['img'],
gt_bboxes,
gt_labels,
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=dataset.PALETTE,
text_color=(200, 200, 200),
mask_color=dataset.PALETTE)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from collections import Sequence
from pathlib import Path
import mmcv
from mmcv import Config, DictAction
from mmdet.core.utils import mask2ndarray
from mmdet.core.visualization import imshow_det_bboxes
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type, cfg_options):
def skip_pipeline_steps(config):
config['pipeline'] = [
x for x in config.pipeline if x['type'] not in skip_type
]
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
if isinstance(train_data_cfg, Sequence):
[skip_pipeline_steps(c) for c in train_data_cfg]
else:
skip_pipeline_steps(train_data_cfg)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
gt_masks = item.get('gt_masks', None)
if gt_masks is not None:
gt_masks = mask2ndarray(gt_masks)
imshow_det_bboxes(
item['img'],
item['gt_bboxes'],
item['gt_labels'],
gt_masks,
class_names=dataset.CLASSES,
show=not args.not_show,
wait_time=args.show_interval,
out_file=filename,
bbox_color=(255, 102, 61),
text_color=(255, 102, 61))
progress_bar.update()
if __name__ == '__main__':
main()
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBoxes
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_boxes=BoundingBoxes(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
import pathlib
from typing import Any, BinaryIO, Dict, Iterator, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper, Zipper
from torchvision.datapoints import BoundingBox
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
read_mat,
)
from .._api import register_dataset, register_info
class StanfordCarsLabelReader(IterDataPipe[Tuple[int, int, int, int, int, str]]):
def __init__(self, datapipe: IterDataPipe[Dict[str, Any]]) -> None:
self.datapipe = datapipe
def __iter__(self) -> Iterator[Tuple[int, int, int, int, int, str]]:
for _, file in self.datapipe:
data = read_mat(file, squeeze_me=True)
for ann in data["annotations"]:
yield tuple(ann) # type: ignore[misc]
NAME = "stanford-cars"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class StanfordCars(Dataset):
"""Stanford Cars dataset.
homepage="https://ai.stanford.edu/~jkrause/cars/car_dataset.html",
dependencies=scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_URL_ROOT = "https://ai.stanford.edu/~jkrause/"
_URLS = {
"train": f"{_URL_ROOT}car196/cars_train.tgz",
"test": f"{_URL_ROOT}car196/cars_test.tgz",
"cars_test_annos_withlabels": f"{_URL_ROOT}car196/cars_test_annos_withlabels.mat",
"car_devkit": f"{_URL_ROOT}cars/car_devkit.tgz",
}
_CHECKSUM = {
"train": "b97deb463af7d58b6bfaa18b2a4de9829f0f79e8ce663dfa9261bf7810e9accd",
"test": "bffea656d6f425cba3c91c6d83336e4c5f86c6cffd8975b0f375d3a10da8e243",
"cars_test_annos_withlabels": "790f75be8ea34eeded134cc559332baf23e30e91367e9ddca97d26ed9b895f05",
"car_devkit": "512b227b30e2f0a8aab9e09485786ab4479582073a144998da74d64b801fd288",
}
def _resources(self) -> List[OnlineResource]:
resources: List[OnlineResource] = [HttpResource(self._URLS[self._split], sha256=self._CHECKSUM[self._split])]
if self._split == "train":
resources.append(HttpResource(url=self._URLS["car_devkit"], sha256=self._CHECKSUM["car_devkit"]))
else:
resources.append(
HttpResource(
self._URLS["cars_test_annos_withlabels"], sha256=self._CHECKSUM["cars_test_annos_withlabels"]
)
)
return resources
def _prepare_sample(self, data: Tuple[Tuple[str, BinaryIO], Tuple[int, int, int, int, int, str]]) -> Dict[str, Any]:
image, target = data
path, buffer = image
image = EncodedImage.from_file(buffer)
return dict(
path=path,
image=image,
label=Label(target[4] - 1, categories=self._categories),
bounding_box=BoundingBox(target[:4], format="xyxy", spatial_size=image.spatial_size),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
if self._split == "train":
targets_dp = Filter(targets_dp, path_comparator("name", "cars_train_annos.mat"))
targets_dp = StanfordCarsLabelReader(targets_dp)
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self) -> List[str]:
resources = self._resources()
devkit_dp = resources[1].load(self._root)
meta_dp = Filter(devkit_dp, path_comparator("name", "cars_meta.mat"))
_, meta_file = next(iter(meta_dp))
return list(read_mat(meta_file, squeeze_me=True)["class_names"])
def __len__(self) -> int:
return 8_144 if self._split == "train" else 8_041
|
from workflows.events import (
Event, # noqa
EventType, # noqa
HumanResponseEvent, # noqa
InputRequiredEvent, # noqa
StartEvent, # noqa
StopEvent, # noqa
)
|
from _collections_abc import dict_items, dict_keys, dict_values
from typing import Any, Dict, Type
from llama_index.core.bridge.pydantic import (
BaseModel,
ConfigDict,
PrivateAttr,
model_serializer,
)
class Event(BaseModel):
"""
Base class for event types that mimics dict interface.
PrivateAttr:
_data (Dict[str, Any]): Underlying Python dict.
Examples:
Basic example usage
```python
from llama_index.core.workflows.events import Event
evt = Event(a=1, b=2)
# can use dot access to get values of `a` and `b`
print((evt.a, evt.b))
# can also set the attrs
evt.a = 2
```
Custom event with additional Fields/PrivateAttr
```python
from llama_index.core.workflows.events import Event
from llama_index.core.bridge.pydantic import Field, PrivateAttr
class CustomEvent(Event):
field_1: int = Field(description="my custom field")
_private_attr_1: int = PrivateAttr()
evt = CustomEvent(a=1, b=2, field_1=3, _private_attr_1=4)
# `field_1` and `_private_attr_1` get set as they do with Pydantic BaseModel
print(evt.field_1)
print(evt._private_attr_1)
# `a` and `b` get set in the underlying dict, namely `evt._data`
print((evt.a, evt.b))
```
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
_data: Dict[str, Any] = PrivateAttr(default_factory=dict)
def __init__(self, **params: Any):
"""
__init__.
NOTE: fields and private_attrs are pulled from params by name.
"""
# extract and set fields, private attrs and remaining shove in _data
fields = {}
private_attrs = {}
data = {}
for k, v in params.items():
if k in self.model_fields:
fields[k] = v
elif k in self.__private_attributes__:
private_attrs[k] = v
else:
data[k] = v
super().__init__(**fields)
for private_attr, value in private_attrs.items():
super().__setattr__(private_attr, value)
if data:
self._data.update(data)
def __getattr__(self, __name: str) -> Any:
if __name in self.__private_attributes__ or __name in self.model_fields:
return super().__getattr__(__name) # type: ignore
else:
try:
return self._data[__name]
except KeyError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{__name}'"
)
def __setattr__(self, name: str, value: Any) -> None:
if name in self.__private_attributes__ or name in self.model_fields:
super().__setattr__(name, value)
else:
self._data.__setitem__(name, value)
def __getitem__(self, key: str) -> Any:
return self._data[key]
def __setitem__(self, key: str, value: Any) -> None:
self._data[key] = value
def get(self, key: str, default: Any = None) -> Any:
return self._data.get(key, default)
def __contains__(self, key: str) -> bool:
return key in self._data
def keys(self) -> "dict_keys[str, Any]":
return self._data.keys()
def values(self) -> "dict_values[str, Any]":
return self._data.values()
def items(self) -> "dict_items[str, Any]":
return self._data.items()
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Any:
return iter(self._data)
def dict(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
return self._data
def __bool__(self) -> bool:
"""Make test `if event:` pass on Event instances."""
return True
@model_serializer(mode="wrap")
def custom_model_dump(self, handler: Any) -> Dict[str, Any]:
data = handler(self)
# include _data in serialization
if self._data:
data["_data"] = self._data
return data
class StartEvent(Event):
"""StartEvent is implicitly sent when a workflow runs."""
class StopEvent(Event):
"""EndEvent signals the workflow to stop."""
_result: Any = PrivateAttr(default=None)
def __init__(self, result: Any = None, **kwargs: Any) -> None:
# forces the user to provide a result
super().__init__(_result=result, **kwargs)
def _get_result(self) -> Any:
"""This can be overridden by subclasses to return the desired result."""
return self._result
@property
def result(self) -> Any:
return self._get_result()
class InputRequiredEvent(Event):
"""InputRequiredEvent is sent when an input is required for a step."""
class HumanResponseEvent(Event):
"""HumanResponseEvent is sent when a human response is required for a step."""
EventType = Type[Event]
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocVec
from docarray.array.any_array import AnyDocArray
class DocArraySummary:
def __init__(self, docs: 'AnyDocArray'):
self.docs = docs
def summary(self) -> None:
"""
Print a summary of this DocList object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocVec
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.docs.__class__.__name__)
table.add_row('Length', str(len(self.docs)), end_section=True)
if isinstance(self.docs, DocVec):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(docs=self.docs)
for field_name in stacked_fields:
val = self.docs
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocList Summary', expand=False))
self.docs.doc_type.schema_summary()
@staticmethod
def _get_stacked_fields(docs: 'DocVec') -> List[str]: # TODO this might
# broken
"""
Return a list of the field names of a DocVec instance that are
doc_vec, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in docs._storage.tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in docs._storage.doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocArraySummary._get_stacked_fields(docs=value_doc)
]
)
return fields
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocArrayStacked
from docarray.array.abstract_array import AnyDocArray
class DocArraySummary:
def __init__(self, da: 'AnyDocArray'):
self.da = da
def summary(self) -> None:
"""
Print a summary of this DocArray object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocArrayStacked
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.da.__class__.__name__)
table.add_row('Length', str(len(self.da)), end_section=True)
if isinstance(self.da, DocArrayStacked):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(da=self.da)
for field_name in stacked_fields:
val = self.da
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocArray Summary', expand=False))
self.da.document_type.schema_summary()
@staticmethod
def _get_stacked_fields(da: 'DocArrayStacked') -> List[str]: # TODO this might
# broken
"""
Return a list of the field names of a DocArrayStacked instance that are
stacked, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in da._storage.tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in da._storage.doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocArraySummary._get_stacked_fields(da=value_doc)
]
)
return fields
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections.abc import Mapping
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
def _consolidate(self, column):
if isinstance(column, list):
if column and all(
isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return np.stack(column)
else:
# don't use np.array(column, dtype=object)
# since it fails in certain cases
# see https://stackoverflow.com/q/51005699
out = np.empty(len(column), dtype=object)
out[:] = column
return out
return column
def _tensorize(self, value):
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value
elif isinstance(value, np.number):
return value
default_dtype = {}
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": np.int64}
elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": np.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
return np.asarray(value, **self.np_array_kwargs)
return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
def _recursive_tensorize(self, data_struct):
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(data_struct, torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
data_struct = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object:
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
if isinstance(data_struct, (list, tuple)):
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct, map_list=False)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from collections.abc import Mapping
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import Formatter
class NumpyFormatter(Formatter[Mapping, np.ndarray, Mapping]):
def __init__(self, features=None, **np_array_kwargs):
super().__init__(features=features)
self.np_array_kwargs = np_array_kwargs
def _consolidate(self, column):
if isinstance(column, list):
if column and all(
isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
):
return np.stack(column)
else:
# don't use np.array(column, dtype=object)
# since it fails in certain cases
# see https://stackoverflow.com/q/51005699
out = np.empty(len(column), dtype=object)
out[:] = column
return out
return column
def _tensorize(self, value):
if isinstance(value, (str, bytes, type(None))):
return value
elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
return value
elif isinstance(value, np.number):
return value
default_dtype = {}
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
default_dtype = {"dtype": np.int64}
elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
default_dtype = {"dtype": np.float32}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(value, PIL.Image.Image):
return np.asarray(value, **self.np_array_kwargs)
return np.array(value, **{**default_dtype, **self.np_array_kwargs})
def _recursive_tensorize(self, data_struct: dict):
# support for nested types like struct of list of struct
if isinstance(data_struct, np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
return self._tensorize(data_struct)
def recursive_tensorize(self, data_struct: dict):
return map_nested(self._recursive_tensorize, data_struct)
def format_row(self, pa_table: pa.Table) -> Mapping:
row = self.numpy_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return self.recursive_tensorize(row)
def format_column(self, pa_table: pa.Table) -> np.ndarray:
column = self.numpy_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
column = self.recursive_tensorize(column)
column = self._consolidate(column)
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
batch = self.numpy_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
batch = self.recursive_tensorize(batch)
for column_name in batch:
batch[column_name] = self._consolidate(batch[column_name])
return batch
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import rms_normalization
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ClickToolInput(BaseModel):
"""Input for ClickTool."""
selector: str = Field(..., description="CSS selector for the element to click")
class ClickTool(BaseBrowserTool):
"""Tool for clicking on an element with the given CSS selector."""
name: str = "click_element"
description: str = "Click on an element with the given CSS selector"
args_schema: Type[BaseModel] = ClickToolInput
visible_only: bool = True
"""Whether to consider only visible elements."""
playwright_strict: bool = False
"""Whether to employ Playwright's strict mode when clicking on elements."""
playwright_timeout: float = 1_000
"""Timeout (in ms) for Playwright to wait for element to be ready."""
def _selector_effective(self, selector: str) -> str:
if not self.visible_only:
return selector
return f"{selector} >> visible=1"
def _run(
self,
selector: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
async def _arun(
self,
selector: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
try:
await page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
|
from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ClickToolInput(BaseModel):
"""Input for ClickTool."""
selector: str = Field(..., description="CSS selector for the element to click")
class ClickTool(BaseBrowserTool): # type: ignore[override, override, override]
"""Tool for clicking on an element with the given CSS selector."""
name: str = "click_element"
description: str = "Click on an element with the given CSS selector"
args_schema: Type[BaseModel] = ClickToolInput
visible_only: bool = True
"""Whether to consider only visible elements."""
playwright_strict: bool = False
"""Whether to employ Playwright's strict mode when clicking on elements."""
playwright_timeout: float = 1_000
"""Timeout (in ms) for Playwright to wait for element to be ready."""
def _selector_effective(self, selector: str) -> str:
if not self.visible_only:
return selector
return f"{selector} >> visible=1"
def _run(
self,
selector: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
async def _arun(
self,
selector: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
try:
await page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
|
from __future__ import annotations
import collections
import json
import logging
import os
import string
from collections.abc import Iterable
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info(f"PhraseTokenizer - Phrase ngram lengths: {self.ngram_lengths}")
logger.info(f"PhraseTokenizer - Num phrases: {len(self.ngram_lookup)}")
def tokenize(self, text: str, **kwargs) -> list[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json")) as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
from __future__ import annotations
import collections
import json
import logging
import os
import string
from typing import Iterable
from transformers.utils.import_utils import NLTK_IMPORT_ERROR, is_nltk_available
from .WordTokenizer import ENGLISH_STOP_WORDS, WordTokenizer
logger = logging.getLogger(__name__)
class PhraseTokenizer(WordTokenizer):
"""Tokenizes the text with respect to existent phrases in the vocab.
This tokenizers respects phrases that are in the vocab. Phrases are separated with 'ngram_separator', for example,
in Google News word2vec file, ngrams are separated with a _ like New_York. These phrases are detected in text and merged as one special token. (New York is the ... => [New_York, is, the])
"""
def __init__(
self,
vocab: Iterable[str] = [],
stop_words: Iterable[str] = ENGLISH_STOP_WORDS,
do_lower_case: bool = False,
ngram_separator: str = "_",
max_ngram_length: int = 5,
):
if not is_nltk_available():
raise ImportError(NLTK_IMPORT_ERROR.format(self.__class__.__name__))
self.stop_words = set(stop_words)
self.do_lower_case = do_lower_case
self.ngram_separator = ngram_separator
self.max_ngram_length = max_ngram_length
self.set_vocab(vocab)
def get_vocab(self):
return self.vocab
def set_vocab(self, vocab: Iterable[str]):
self.vocab = vocab
self.word2idx = collections.OrderedDict([(word, idx) for idx, word in enumerate(vocab)])
# Check for ngram in vocab
self.ngram_lookup = set()
self.ngram_lengths = set()
for word in vocab:
if self.ngram_separator is not None and self.ngram_separator in word:
# Sum words might me malformed in e.g. google news word2vec, containing two or more _ after each other
ngram_count = word.count(self.ngram_separator) + 1
if self.ngram_separator + self.ngram_separator not in word and ngram_count <= self.max_ngram_length:
self.ngram_lookup.add(word)
self.ngram_lengths.add(ngram_count)
if len(vocab) > 0:
logger.info(f"PhraseTokenizer - Phrase ngram lengths: {self.ngram_lengths}")
logger.info(f"PhraseTokenizer - Num phrases: {len(self.ngram_lookup)}")
def tokenize(self, text: str, **kwargs) -> list[int]:
from nltk import word_tokenize
tokens = word_tokenize(text, preserve_line=True)
# phrase detection
for ngram_len in sorted(self.ngram_lengths, reverse=True):
idx = 0
while idx <= len(tokens) - ngram_len:
ngram = self.ngram_separator.join(tokens[idx : idx + ngram_len])
if ngram in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram]
elif ngram.lower() in self.ngram_lookup:
tokens[idx : idx + ngram_len] = [ngram.lower()]
idx += 1
# Map tokens to idx, filter stop words
tokens_filtered = []
for token in tokens:
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.lower()
if token in self.stop_words:
continue
elif token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
token = token.strip(string.punctuation)
if token in self.stop_words:
continue
elif len(token) > 0 and token in self.word2idx:
tokens_filtered.append(self.word2idx[token])
continue
return tokens_filtered
def save(self, output_path: str):
with open(os.path.join(output_path, "phrasetokenizer_config.json"), "w") as fOut:
json.dump(
{
"vocab": list(self.word2idx.keys()),
"stop_words": list(self.stop_words),
"do_lower_case": self.do_lower_case,
"ngram_separator": self.ngram_separator,
"max_ngram_length": self.max_ngram_length,
},
fOut,
)
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "phrasetokenizer_config.json")) as fIn:
config = json.load(fIn)
return PhraseTokenizer(**config)
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
|
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_inpaint_pipeline(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
model_id = "xvjiarui/stable-diffusion-2-inpainting"
pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None)
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
prng_seed = jax.random.PRNGKey(0)
num_inference_steps = 50
num_samples = jax.device_count()
prompt = num_samples * [prompt]
init_image = num_samples * [init_image]
mask_image = num_samples * [mask_image]
prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image)
# shard inputs and rng
params = replicate(params)
prng_seed = jax.random.split(prng_seed, jax.device_count())
prompt_ids = shard(prompt_ids)
processed_masked_images = shard(processed_masked_images)
processed_masks = shard(processed_masks)
output = pipeline(
prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True
)
images = output.images.reshape(num_samples, 512, 512, 3)
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def test_stable_diffusion_inpaint_pipeline(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png"
)
mask_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png"
)
model_id = "xvjiarui/stable-diffusion-2-inpainting"
pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None)
prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
prng_seed = jax.random.PRNGKey(0)
num_inference_steps = 50
num_samples = jax.device_count()
prompt = num_samples * [prompt]
init_image = num_samples * [init_image]
mask_image = num_samples * [mask_image]
prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image)
# shard inputs and rng
params = replicate(params)
prng_seed = jax.random.split(prng_seed, jax.device_count())
prompt_ids = shard(prompt_ids)
processed_masked_images = shard(processed_masked_images)
processed_masks = shard(processed_masks)
output = pipeline(
prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True
)
images = output.images.reshape(num_samples, 512, 512, 3)
image_slice = images[0, 253:256, 253:256, -1]
output_slice = jnp.asarray(jax.device_get(image_slice.flatten()))
expected_slice = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084]
)
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
import logging
import sys
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, evaluation
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
import logging
import sys
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, evaluation
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.nasnet import NASNetLarge as NASNetLarge
from keras.src.applications.nasnet import NASNetMobile as NASNetMobile
from keras.src.applications.nasnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.nasnet import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.nasnet import NASNetLarge
from keras.src.applications.nasnet import NASNetMobile
from keras.src.applications.nasnet import decode_predictions
from keras.src.applications.nasnet import preprocess_input
|
_base_ = './ms-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
from typing import Any, Optional
def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str:
"""
Recursively converts a Python object (from JSON) into a Markdown string.
Args:
data: The Python object to convert.
level: The current nesting level (used for indentation and heading levels).
header: Section header.
Returns:
A string containing the Markdown representation of the data.
"""
markdown_parts = []
indent = " " * level
if isinstance(data, dict):
for key, value in data.items():
heading_level = min(level + 1, 6)
markdown_parts.append(f"{indent}{'#' * heading_level} {key}\n")
markdown_parts.append(json_to_markdown(value, level + 1))
markdown_parts.append("\n")
elif isinstance(data, list):
if not data:
markdown_parts.append(f"{indent}- *Empty List*\n")
else:
if header:
markdown_parts.append(f"# {header}\n")
for index, item in enumerate(data):
if isinstance(item, (dict, list)):
markdown_parts.append(f"{indent}- Item {index + 1}:\n")
markdown_parts.append(json_to_markdown(item, level + 1))
else:
markdown_parts.append(f"{indent}- {item!s}\n")
elif isinstance(data, str):
if "\n" in data:
# nl var to enable the usage of this symbol inside f-string expressions
nl = "\n"
markdown_parts.append(
f"{indent}> {data.replace(nl, nl + indent + '> ')}\n"
)
else:
markdown_parts.append(f"{indent}{data}\n")
elif isinstance(data, (int, float, bool)) or data is None:
markdown_parts.append(f"{indent}{data!s}\n")
else:
markdown_parts.append(f"{indent}{data!s}\n")
return "".join(markdown_parts).rstrip("\n") + "\n"
|
from typing import Any
def json_to_markdown(data: Any, level: int = 0, header: str | None = None) -> str:
"""
Recursively converts a Python object (from JSON) into a Markdown string.
Args:
data: The Python object to convert.
level: The current nesting level (used for indentation and heading levels).
header: Section header.
Returns:
A string containing the Markdown representation of the data.
"""
markdown_parts = []
indent = " " * level
if isinstance(data, dict):
for key, value in data.items():
heading_level = min(level + 1, 6)
markdown_parts.append(f"{indent}{'#' * heading_level} {key}\n")
markdown_parts.append(json_to_markdown(value, level + 1))
markdown_parts.append("\n")
elif isinstance(data, list):
if not data:
markdown_parts.append(f"{indent}- *Empty List*\n")
else:
if header:
markdown_parts.append(f"# {header}\n")
for index, item in enumerate(data):
if isinstance(item, (dict, list)):
markdown_parts.append(f"{indent}- Item {index + 1}:\n")
markdown_parts.append(json_to_markdown(item, level + 1))
else:
markdown_parts.append(f"{indent}- {item!s}\n")
elif isinstance(data, str):
if "\n" in data:
# nl var to enable the usage of this symbol inside f-string expressions
nl = "\n"
markdown_parts.append(
f"{indent}> {data.replace(nl, nl + indent + '> ')}\n"
)
else:
markdown_parts.append(f"{indent}{data}\n")
elif isinstance(data, (int, float, bool)) or data is None:
markdown_parts.append(f"{indent}{data!s}\n")
else:
markdown_parts.append(f"{indent}{data!s}\n")
return "".join(markdown_parts).rstrip("\n") + "\n"
|
_base_ = 'faster-rcnn_r50_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
client.delete_collection(collection_name='documents')
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import uuid
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
client.delete_collection(collection_name='documents')
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import os
from typing import Callable, List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from ...transform_encoder import TransformerTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_compute_tokens():
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(["hello this is a test", "and another test"])
assert tokens["input_ids"].shape == (2, 7)
assert tokens["attention_mask"].shape == (2, 7)
@pytest.mark.parametrize(
'hidden_seqlen', [4, 8]
)
def test_compute_embeddings(hidden_seqlen):
embedding_size = 10
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(["hello world"])
hidden_states = tuple(torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7))
embeddings = enc._compute_embedding(
hidden_states=hidden_states, input_tokens=tokens
)
assert embeddings.shape == (1, embedding_size)
def test_encoding_cpu():
enc = TransformerTorchEncoder(device="cpu")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="GPU is needed for this test")
def test_encoding_gpu():
enc = TransformerTorchEncoder(device="cuda")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = TransformerTorchEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res).traverse_flat([path]).get_attributes("embedding")
)
for emb in embeddings:
if emb is None:
return False
return len(embeddings) == count
return validate
encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {"traversal_paths": [traversal_path]})
assert validate_traversal(docs_per_path)(docs)
def test_multiple_traversal_paths():
sentences = list()
sentences.append("Hello, my name is Michael.")
sentences.append("Today we are going to Disney World.")
sentences.append("There are animals on the road")
sentences.append("A dog is running down the road")
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = TransformerTorchEncoder(default_traversal_paths=["r", "c", "cc"])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (768,)
assert doc.chunks[0].embedding.shape == (768,)
assert doc.chunks[0].chunks[0].embedding.shape == (768,)
|
import os
from typing import Callable, List
import numpy as np
import pytest
import torch
from jina import Document, DocumentArray
from jinahub.encoder.transform_encoder import TransformerTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_compute_tokens():
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(["hello this is a test", "and another test"])
assert tokens["input_ids"].shape == (2, 7)
assert tokens["attention_mask"].shape == (2, 7)
@pytest.mark.parametrize(
'hidden_seqlen', [4, 8]
)
def test_compute_embeddings(hidden_seqlen):
embedding_size = 10
enc = TransformerTorchEncoder()
tokens = enc._generate_input_tokens(["hello world"])
hidden_states = tuple(torch.zeros(1, hidden_seqlen, embedding_size) for _ in range(7))
embeddings = enc._compute_embedding(
hidden_states=hidden_states, input_tokens=tokens
)
assert embeddings.shape == (1, embedding_size)
def test_encoding_cpu():
enc = TransformerTorchEncoder(device="cpu")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="GPU is needed for this test")
def test_encoding_gpu():
enc = TransformerTorchEncoder(device="cuda")
input_data = DocumentArray([Document(text="hello world")])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (768,)
def test_encodes_semantic_meaning():
sentences = dict()
sentences["A"] = "Hello, my name is Michael."
sentences["B"] = "Today we are going to Disney World."
sentences["C"] = "There are animals on the road"
sentences["D"] = "A dog is running down the road"
encoder = TransformerTorchEncoder()
embeddings = {}
for id_, sentence in sentences.items():
docs = DocumentArray([Document(text=sentence)])
encoder.encode(docs, parameters={})
embeddings[id_] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist("C", "D")
assert small_distance < dist("C", "B")
assert small_distance < dist("C", "A")
assert small_distance < dist("B", "A")
@pytest.mark.parametrize(
["docs", "docs_per_path", "traversal_path"],
[
(pytest.lazy_fixture("docs_with_text"), [["r", 10], ["c", 0], ["cc", 0]], "r"),
(
pytest.lazy_fixture("docs_with_chunk_text"),
[["r", 0], ["c", 10], ["cc", 0]],
"c",
),
(
pytest.lazy_fixture("docs_with_chunk_chunk_text"),
[["r", 0], ["c", 0], ["cc", 10]],
"cc",
),
],
)
def test_traversal_path(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_path: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res).traverse_flat([path]).get_attributes("embedding")
)
for emb in embeddings:
if emb is None:
return False
return len(embeddings) == count
return validate
encoder = TransformerTorchEncoder(default_traversal_paths=[traversal_path])
encoder.encode(docs, {"traversal_paths": [traversal_path]})
assert validate_traversal(docs_per_path)(docs)
def test_multiple_traversal_paths():
sentences = list()
sentences.append("Hello, my name is Michael.")
sentences.append("Today we are going to Disney World.")
sentences.append("There are animals on the road")
sentences.append("A dog is running down the road")
docs = DocumentArray([Document(text=sentence) for sentence in sentences])
for index, sent in enumerate(sentences):
docs[index].chunks.append(Document(text=sent))
docs[index].chunks[0].chunks.append(Document(text=sentences[3 - index]))
encoder = TransformerTorchEncoder(default_traversal_paths=["r", "c", "cc"])
encoder.encode(docs, {})
for doc in docs:
assert doc.embedding.shape == (768,)
assert doc.chunks[0].embedding.shape == (768,)
assert doc.chunks[0].chunks[0].embedding.shape == (768,)
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING: # pragma: no cover
from .workflow import Workflow
class ServiceNotFoundError(Exception):
"""An error raised when the service manager couldn't find a certain service name."""
class ServiceManager:
"""
An helper class to decouple how services are managed from the Workflow class.
A Service is nothing more than a workflow instance attached to another workflow.
The service is made available to the steps of the main workflow.
"""
def __init__(self) -> None:
self._services: Dict[str, Workflow] = {}
def get(self, name: str, default: Optional["Workflow"] = None) -> "Workflow":
try:
return self._services[name]
except KeyError as e:
if default:
return default
msg = f"Service {name} not found"
raise ServiceNotFoundError(msg)
def add(self, name: str, service: "Workflow") -> None:
self._services[name] = service
|
from typing import Dict, TYPE_CHECKING, Optional
if TYPE_CHECKING: # pragma: no cover
from .workflow import Workflow
class ServiceNotFoundError(Exception):
"""An error raised when the service manager couldn't find a certain service name."""
class ServiceManager:
"""An helper class to decouple how services are managed from the Workflow class.
A Service is nothing more than a workflow instance attached to another workflow.
The service is made available to the steps of the main workflow.
"""
def __init__(self) -> None:
self._services: Dict[str, "Workflow"] = {}
def get(self, name: str, default: Optional["Workflow"] = None) -> "Workflow":
try:
return self._services[name]
except KeyError as e:
if default:
return default
msg = f"Service {name} not found"
raise ServiceNotFoundError(msg)
def add(self, name: str, service: "Workflow") -> None:
self._services[name] = service
|
from pydantic import BaseModel
from typing import Optional, Dict, List
class AlphaMatrix(BaseModel):
"""
This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided.
Pydantic class to enforce the required fields for a KodaRetriever
Its best to just instantiate this using a dictionary, don't both trying to instantiate by declaring any AlphaCategory objects.
Example:
>>> data = {
"normal query": { # examples is not required if you aren't using few-shot auto-routing
"alpha": .5
, "description": "This is a normal query" # desc is not required if you aren't using few-shot auto-routing
, "examples": ["This is a normal query", "Another normal query"]
}
}
>>> matrix = AlphaMatrix(data=data) # arg must be named matrix for the retriever to use it
"""
class AlphaCategory(BaseModel):
"""
Subclass to enforce the required fields for a category in the AlphaMatrix - necessary for nesting in the AlphaMatrix class
You should not have to really touch this, as it is only used for type checking and validation.
"""
alpha: float
description: Optional[
str
] = None # optional if providing a custom LLM, its presumed this was part of your training data for the custom model
examples: Optional[
List[str]
] = None # if not providing a custom model, this is required
data: Dict[str, AlphaCategory]
def get_alpha(self, category: str) -> float:
"""Simple helper function to get the alpha value for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).alpha # type: ignore
def get_examples(self, category: str) -> List[str]:
"""Simple helper function to get the examples for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).examples # type: ignore
def get_description(self, category: str) -> str:
"""Simple helper function to get the description for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).description # type: ignore
def get_categories(self) -> list:
"""Simple helper function to get the categories for a given category."""
return list(self.data.keys())
def format_category(self, category: str) -> str:
"""Simple helper function to format the category information for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
description = self.get_description(category)
examples = self.get_examples(category)
category_info = f"""
- {category}:
description: {description}
""".strip()
if examples:
examples = "; ".join(examples)
example_info = f"""
examples:
{examples}
"""
category_info = f"{category_info}\n{example_info}"
return category_info
def get_all_category_info(self) -> str:
"""Simple helper function to get the category information for all categories."""
categories = []
for category in self.get_categories():
category_info = self.format_category(category)
categories.append(category_info)
return "\n".join(categories)
|
from pydantic import BaseModel
from typing import Optional, Dict, List
class AlphaMatrix(BaseModel):
"""
This class is not necessary to understand to use a KodaRetriever - as it will be automatically instantiated if a dictionary is provided.
Pydantic class to enforce the required fields for a KodaRetriever
Its best to just instantiate this using a dictionary, don't both trying to instantiate by declaring any AlphaCategory objects.
Example:
>>> data = {
"normal query": { # examples is not required if you aren't using few-shot auto-routing
"alpha": .5
, "description": "This is a normal query" # desc is not required if you aren't using few-shot auto-routing
, "examples": ["This is a normal query", "Another normal query"]
}
}
>>> matrix = AlphaMatrix(data=data) # arg must be named matrix for the retriever to use it
"""
class AlphaCategory(BaseModel):
"""
Subclass to enforce the required fields for a category in the AlphaMatrix - necessary for nesting in the AlphaMatrix class
You should not have to really touch this, as it is only used for type checking and validation.
"""
alpha: float
description: Optional[
str
] = None # optional if providing a custom LLM, its presumed this was part of your training data for the custom model
examples: Optional[
List[str]
] = None # if not providing a custom model, this is required
data: Dict[str, AlphaCategory]
def get_alpha(self, category: str) -> float:
"""Simple helper function to get the alpha value for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).alpha # type: ignore
def get_examples(self, category: str) -> List[str]:
"""Simple helper function to get the examples for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).examples # type: ignore
def get_description(self, category: str) -> str:
"""Simple helper function to get the description for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
return self.data.get(category).description # type: ignore
def get_categories(self) -> list:
"""Simple helper function to get the categories for a given category."""
return list(self.data.keys())
def format_category(self, category: str) -> str:
"""Simple helper function to format the category information for a given category."""
if category not in self.data:
err = f"Provided category '{category}' cannot be found"
raise ValueError(err)
description = self.get_description(category)
examples = self.get_examples(category)
category_info = f"""
- {category}:
description: {description}
""".strip()
if examples:
examples = "; ".join(examples)
example_info = f"""
examples:
{examples}
"""
category_info = f"{category_info}\n{example_info}"
return category_info
def get_all_category_info(self) -> str:
"""Simple helper function to get the category information for all categories."""
categories = []
for category in self.get_categories():
category_info = self.format_category(category)
categories.append(category_info)
return "\n".join(categories)
|
from dataclasses import dataclass
from typing import List, Union
import numpy as np
import PIL.Image
import torch
from diffusers.utils import BaseOutput
@dataclass
class HunyuanVideoPipelineOutput(BaseOutput):
r"""
Output class for HunyuanVideo pipelines.
Args:
frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
`(batch_size, num_frames, channels, height, width)`.
"""
frames: torch.Tensor
@dataclass
class HunyuanVideoFramepackPipelineOutput(BaseOutput):
r"""
Output class for HunyuanVideo pipelines.
Args:
frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
`(batch_size, num_frames, channels, height, width)`. Or, a list of torch tensors where each tensor
corresponds to a latent that decodes to multiple frames.
"""
frames: Union[torch.Tensor, np.ndarray, List[List[PIL.Image.Image]], List[torch.Tensor]]
|
from dataclasses import dataclass
import torch
from diffusers.utils import BaseOutput
@dataclass
class HunyuanVideoPipelineOutput(BaseOutput):
r"""
Output class for HunyuanVideo pipelines.
Args:
frames (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
`(batch_size, num_frames, channels, height, width)`.
"""
frames: torch.Tensor
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite>=0.3.12',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov==3.0.0',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.12',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client==0.8.0',
],
'annlite': [
'annlite>=0.3.12',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.12',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._temporal import UniformTemporalSubsample
from ._type_conversion import DecodeImage, LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
from torchvision.transforms import AutoAugmentPolicy, InterpolationMode # usort: skip
from . import functional # usort: skip
from ._transform import Transform # usort: skip
from ._presets import StereoMatching # usort: skip
from ._augment import RandomCutmix, RandomErasing, RandomMixup, SimpleCopyPaste
from ._auto_augment import AugMix, AutoAugment, RandAugment, TrivialAugmentWide
from ._color import (
ColorJitter,
RandomAdjustSharpness,
RandomAutocontrast,
RandomEqualize,
RandomInvert,
RandomPhotometricDistort,
RandomPosterize,
RandomSolarize,
)
from ._container import Compose, RandomApply, RandomChoice, RandomOrder
from ._geometry import (
CenterCrop,
ElasticTransform,
FiveCrop,
FixedSizeCrop,
Pad,
RandomAffine,
RandomCrop,
RandomHorizontalFlip,
RandomIoUCrop,
RandomPerspective,
RandomResize,
RandomResizedCrop,
RandomRotation,
RandomShortestSize,
RandomVerticalFlip,
RandomZoomOut,
Resize,
ScaleJitter,
TenCrop,
)
from ._meta import ClampBoundingBoxes, ConvertBoundingBoxFormat, ConvertColorSpace, ConvertDtype, ConvertImageDtype
from ._misc import (
GaussianBlur,
Identity,
Lambda,
LinearTransformation,
Normalize,
PermuteDimensions,
RemoveSmallBoundingBoxes,
ToDtype,
TransposeDimensions,
)
from ._type_conversion import DecodeImage, LabelToOneHot, PILToTensor, ToImagePIL, ToImageTensor, ToPILImage
from ._deprecated import Grayscale, RandomGrayscale, ToTensor # usort: skip
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RetinaNet',
backbone=dict(
_delete_=True,
type='PyramidVisionTransformerV2',
embed_dims=32,
num_layers=[2, 2, 2, 2],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b0.pth')),
neck=dict(in_channels=[32, 64, 160, 256]))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='RetinaNet',
backbone=dict(
_delete_=True,
type='PyramidVisionTransformerV2',
embed_dims=32,
num_layers=[2, 2, 2, 2],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b0.pth')),
neck=dict(in_channels=[32, 64, 160, 256]))
# optimizer
optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct-improved{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Hugging Face/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with (
gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(filepath, encoding="utf8") as fIn
):
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info(f"Train sentences: {len(train_sentences)}")
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct-improved{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Hugging Face/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info(f"Train sentences: {len(train_sentences)}")
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
MultiDataAspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .custom_sample_size_sampler import CustomSampleSizeSampler
from .multi_data_sampler import MultiDataSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler', 'MultiDataSampler',
'MultiDataAspectRatioBatchSampler', 'CustomSampleSizeSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .batch_sampler import (AspectRatioBatchSampler,
MultiDataAspectRatioBatchSampler,
TrackAspectRatioBatchSampler)
from .class_aware_sampler import ClassAwareSampler
from .multi_data_sampler import MultiDataSampler
from .multi_source_sampler import GroupMultiSourceSampler, MultiSourceSampler
from .track_img_sampler import TrackImgSampler
__all__ = [
'ClassAwareSampler', 'AspectRatioBatchSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'TrackImgSampler',
'TrackAspectRatioBatchSampler', 'MultiDataSampler',
'MultiDataAspectRatioBatchSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert'
]
|
import warnings
import wave
from abc import ABC
from typing import BinaryIO, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.utils.misc import is_notebook
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save_to_wav_file(
self: 'T',
file_path: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> None:
"""
Save audio tensor to a .wav file. Mono/stereo is preserved.
:param file_path: path to a .wav file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
"""
comp_backend = self.get_comp_backend()
n_channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
with wave.open(file_path, 'w') as f:
f.setnchannels(n_channels)
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(self.to_bytes())
def display(self, rate=44100):
"""
Play audio data from tensor in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
audio_np = self.get_comp_backend().to_numpy(self)
display(Audio(audio_np, rate=rate))
else:
warnings.warn('Display of audio is only possible in a notebook.')
|
import wave
from abc import ABC
from typing import BinaryIO, TypeVar, Union
from docarray.typing.tensor.abstract_tensor import AbstractTensor
T = TypeVar('T', bound='AbstractAudioTensor')
MAX_INT_16 = 2**15
class AbstractAudioTensor(AbstractTensor, ABC):
def to_bytes(self):
"""
Convert audio tensor to bytes.
"""
tensor = self.get_comp_backend().to_numpy(self)
tensor = (tensor * MAX_INT_16).astype('<h')
return tensor.tobytes()
def save_to_wav_file(
self: 'T',
file_path: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> None:
"""
Save audio tensor to a .wav file. Mono/stereo is preserved.
:param file_path: path to a .wav file. If file is a string, open the file by
that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
"""
comp_backend = self.get_comp_backend()
n_channels = 2 if comp_backend.n_dim(array=self) > 1 else 1 # type: ignore
with wave.open(file_path, 'w') as f:
f.setnchannels(n_channels)
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(self.to_bytes())
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "usps"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class USPS(Dataset):
"""USPS Dataset
homepage="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass"
_RESOURCES = {
"train": HttpResource(
f"{_URL}/usps.bz2", sha256="3771e9dd6ba685185f89867b6e249233dd74652389f263963b3b741e994b034f"
),
"test": HttpResource(
f"{_URL}/usps.t.bz2", sha256="a9c0164e797d60142a50604917f0baa604f326e9a689698763793fa5d12ffc4e"
),
}
def _resources(self) -> List[OnlineResource]:
return [USPS._RESOURCES[self._split]]
def _prepare_sample(self, line: str) -> Dict[str, Any]:
label, *values = line.strip().split(" ")
values = [float(value.split(":")[1]) for value in values]
pixels = torch.tensor(values).add_(1).div_(2)
return dict(
image=Image(pixels.reshape(16, 16)),
label=Label(int(label) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = Decompressor(resource_dps[0])
dp = LineReader(dp, decode=True, return_path=False)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 7_291 if self._split == "train" else 2_007
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.prototype.datapoints import Image, Label
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "usps"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class USPS(Dataset):
"""USPS Dataset
homepage="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass"
_RESOURCES = {
"train": HttpResource(
f"{_URL}/usps.bz2", sha256="3771e9dd6ba685185f89867b6e249233dd74652389f263963b3b741e994b034f"
),
"test": HttpResource(
f"{_URL}/usps.t.bz2", sha256="a9c0164e797d60142a50604917f0baa604f326e9a689698763793fa5d12ffc4e"
),
}
def _resources(self) -> List[OnlineResource]:
return [USPS._RESOURCES[self._split]]
def _prepare_sample(self, line: str) -> Dict[str, Any]:
label, *values = line.strip().split(" ")
values = [float(value.split(":")[1]) for value in values]
pixels = torch.tensor(values).add_(1).div_(2)
return dict(
image=Image(pixels.reshape(16, 16)),
label=Label(int(label) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = Decompressor(resource_dps[0])
dp = LineReader(dp, decode=True, return_path=False)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 7_291 if self._split == "train" else 2_007
|
from typing import Dict
from jina import Flow, DocumentArray, Document, Executor, Client, requests
ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}}
OVERRIDEN_EXECUTOR1_PARAMS = {
'param1': 'changed',
'param2': 60,
'exec_name': {'param1': 'changed'},
}
class DummyOverrideParams(Executor):
@requests()
def bar(self, docs: 'DocumentArray', parameters: Dict, *args, **kwargs):
for doc in docs:
doc.tags = parameters
class DummyAssertNotOverrideBetweenPodsParams(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
assert parameters == ORIGINAL_PARAMS
parameters['param2'] = 'change_in_pod'
class DummyAssertIfParamsCanBeChangedInsidePods(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
# this test is not sure it is intended, but good way of documenting
assert parameters == ORIGINAL_PARAMS
def test_override_params(mocker, port_generator):
exposed_port = port_generator()
f = (
Flow(port=exposed_port)
.add(
uses={'jtype': 'DummyOverrideParams', 'metas': {'name': 'exec_name'}},
)
.add(uses=DummyAssertNotOverrideBetweenPodsParams)
.add(uses=DummyAssertIfParamsCanBeChangedInsidePods)
)
error_mock = mocker.Mock()
with f:
resp = Client(port=exposed_port, return_responses=True).index(
inputs=DocumentArray([Document()]),
parameters={'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}},
on_error=error_mock,
)
error_mock.assert_not_called()
assert len(resp) == 1
assert len(resp[0].docs) == 1
for doc in resp[0].docs:
assert doc.tags == OVERRIDEN_EXECUTOR1_PARAMS
assert doc.tags['param1'] == 'changed'
assert doc.tags['param2'] == 60
assert doc.tags['exec_name']['param1'] == 'changed'
|
from typing import Dict
from jina import Flow, DocumentArray, Document, Executor, Client, requests
ORIGINAL_PARAMS = {'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}}
OVERRIDEN_EXECUTOR1_PARAMS = {
'param1': 'changed',
'param2': 60,
'exec_name': {'param1': 'changed'},
}
exposed_port = 12345
class DummyOverrideParams(Executor):
@requests()
def bar(self, docs: 'DocumentArray', parameters: Dict, *args, **kwargs):
for doc in docs:
doc.tags = parameters
class DummyAssertNotOverrideBetweenPodsParams(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
assert parameters == ORIGINAL_PARAMS
parameters['param2'] = 'change_in_pod'
class DummyAssertIfParamsCanBeChangedInsidePods(Executor):
@requests()
def bar(self, parameters: Dict, *args, **kwargs):
# this test is not sure it is intended, but good way of documenting
assert parameters == ORIGINAL_PARAMS
def test_override_params(mocker):
f = (
Flow(port=exposed_port)
.add(
uses={'jtype': 'DummyOverrideParams', 'metas': {'name': 'exec_name'}},
)
.add(uses=DummyAssertNotOverrideBetweenPodsParams)
.add(uses=DummyAssertIfParamsCanBeChangedInsidePods)
)
error_mock = mocker.Mock()
with f:
resp = Client(port=exposed_port, return_responses=True).index(
inputs=DocumentArray([Document()]),
parameters={'param1': 50, 'param2': 60, 'exec_name': {'param1': 'changed'}},
on_error=error_mock,
)
error_mock.assert_not_called()
assert len(resp) == 1
assert len(resp[0].docs) == 1
for doc in resp[0].docs:
assert doc.tags == OVERRIDEN_EXECUTOR1_PARAMS
assert doc.tags['param1'] == 'changed'
assert doc.tags['param2'] == 60
assert doc.tags['exec_name']['param1'] == 'changed'
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = os.path.join(
HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=False), config.DATASET_INFO_FILENAME
).replace(os.sep, "/")
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_wikipedia_frr(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds is not None
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
DATASETS_ON_HF_GCP = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def list_datasets_on_hf_gcp_parameters(with_config=True):
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=True))
class TestDatasetOnHfGcp(TestCase):
dataset = None
config_name = None
def test_dataset_info_available(self, dataset, config_name):
with TemporaryDirectory() as tmp_dir:
dataset_module = dataset_module_factory(dataset, cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name=config_name,
hash=dataset_module.hash,
)
dataset_info_url = os.path.join(
HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=False), config.DATASET_INFO_FILENAME
).replace(os.sep, "/")
datset_info_path = cached_path(dataset_info_url, cache_dir=tmp_dir)
self.assertTrue(os.path.exists(datset_info_path))
@pytest.mark.integration
def test_wikipedia_frr(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_hf_gcp") / "test_wikipedia_simple"
dataset_module = dataset_module_factory("wikipedia", cache_dir=tmp_dir)
builder_cls = import_main_class(dataset_module.module_path, dataset=True)
builder_instance: DatasetBuilder = builder_cls(
cache_dir=tmp_dir,
config_name="20220301.frr",
hash=dataset_module.hash,
)
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
builder_instance._download_and_prepare = None
builder_instance.download_and_prepare()
ds = builder_instance.as_dataset()
assert ds is not None
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder.
Args:
use_box_type (bool): Whether to warp decoded boxes with the
box type data structure. Defaults to False.
"""
# The size of the last of dimension of the encoded tensor.
encode_size = 4
def __init__(self, use_box_type: bool = False, **kwargs):
self.use_box_type = use_box_type
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder.
Args:
use_box_type (bool): Whether to warp decoded boxes with the
boxlist data structure. Defaults to False.
"""
# The size of the last of dimension of the encoded tensor.
encode_size = 4
def __init__(self, use_box_type: bool = False, **kwargs):
self.use_box_type = use_box_type
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
|
from __future__ import annotations
import sys
from .classification import CrossEncoderClassificationEvaluator
from .correlation import CrossEncoderCorrelationEvaluator
from .deprecated import (
CEBinaryAccuracyEvaluator,
CEBinaryClassificationEvaluator,
CECorrelationEvaluator,
CEF1Evaluator,
CERerankingEvaluator,
CESoftmaxAccuracyEvaluator,
)
from .nano_beir import CrossEncoderNanoBEIREvaluator
from .reranking import CrossEncoderRerankingEvaluator
# Ensure that imports using deprecated paths still work
# Although importing via `from sentence_transformers.cross_encoder.evaluation import ...` is recommended
deprecated_modules = [
"sentence_transformers.cross_encoder.evaluation.CEBinaryAccuracyEvaluator",
"sentence_transformers.cross_encoder.evaluation.CEBinaryClassificationEvaluator",
"sentence_transformers.cross_encoder.evaluation.CEF1Evaluator",
"sentence_transformers.cross_encoder.evaluation.CESoftmaxAccuracyEvaluator",
"sentence_transformers.cross_encoder.evaluation.CECorrelationEvaluator",
"sentence_transformers.cross_encoder.evaluation.CERerankingEvaluator",
]
for module in deprecated_modules:
sys.modules[module] = sys.modules["sentence_transformers.cross_encoder.evaluation.deprecated"]
__all__ = [
"CrossEncoderClassificationEvaluator",
"CrossEncoderCorrelationEvaluator",
"CrossEncoderRerankingEvaluator",
"CrossEncoderNanoBEIREvaluator",
# Deprecated:
"CERerankingEvaluator",
"CECorrelationEvaluator",
"CEBinaryAccuracyEvaluator",
"CEBinaryClassificationEvaluator",
"CEF1Evaluator",
"CESoftmaxAccuracyEvaluator",
]
|
from __future__ import annotations
# TODO: Consider renaming all evaluators to CrossEncoder..., e.g. CrossEncoderNanoBEIREvaluator, CrossEncoderClassificationEvaluator, etc.
from .CEBinaryAccuracyEvaluator import CEBinaryAccuracyEvaluator
from .CEBinaryClassificationEvaluator import CEBinaryClassificationEvaluator
from .CEClassificationEvaluator import CEClassificationEvaluator
from .CECorrelationEvaluator import CECorrelationEvaluator
from .CEF1Evaluator import CEF1Evaluator
from .CENanoBEIREvaluator import CENanoBEIREvaluator
from .CERerankingEvaluator import CERerankingEvaluator
from .CESoftmaxAccuracyEvaluator import CESoftmaxAccuracyEvaluator
__all__ = [
"CEClassificationEvaluator",
"CECorrelationEvaluator",
"CERerankingEvaluator",
"CENanoBEIREvaluator",
"CEBinaryAccuracyEvaluator", # Deprecated
"CEBinaryClassificationEvaluator", # Deprecated
"CEF1Evaluator", # Deprecated
"CESoftmaxAccuracyEvaluator", # Deprecated
]
|
from typing import Dict
MISTRALAI_MODELS: Dict[str, int] = {
"mistral-tiny": 32000,
"mistral-small": 32000,
"mistral-medium": 32000,
"mistral-large": 32000,
"open-mixtral-8x7b": 32000,
"open-mistral-7b": 32000,
"open-mixtral-8x22b": 64000,
"mistral-small-latest": 32000,
"mistral-medium-latest": 32000,
"mistral-large-latest": 32000,
"codestral-latest": 32000,
"open-mistral-nemo-latest": 128000,
"ministral-8b-latest": 128000,
"ministral-3b-latest": 128000,
}
MISTRALAI_FUNCTION_CALLING_MODELS = (
"mistral-large-latest",
"open-mixtral-8x22b",
"ministral-8b-latest",
"ministral-3b-latest",
"mistral-small-latest",
"codestral-latest",
"open-mistral-nemo-latest",
)
MISTRALAI_CODE_MODELS = "codestral-latest"
def mistralai_modelname_to_contextsize(modelname: str) -> int:
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname not in MISTRALAI_MODELS:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid MistralAI model name."
"Known models are: " + ", ".join(MISTRALAI_MODELS.keys())
)
return MISTRALAI_MODELS[modelname]
def is_mistralai_function_calling_model(modelname: str) -> bool:
return modelname in MISTRALAI_FUNCTION_CALLING_MODELS
def is_mistralai_code_model(modelname: str) -> bool:
return modelname in MISTRALAI_CODE_MODELS
|
from typing import Dict
MISTRALAI_MODELS: Dict[str, int] = {
"mistral-tiny": 32000,
"mistral-small": 32000,
"mistral-medium": 32000,
"mistral-large": 32000,
"open-mixtral-8x7b": 32000,
"open-mistral-7b": 32000,
"open-mixtral-8x22b": 64000,
"mistral-small-latest": 32000,
"mistral-medium-latest": 32000,
"mistral-large-latest": 32000,
"codestral-latest": 32000,
"open-mistral-nemo-latest": 128000,
"ministral-8b-latest": 128000,
"ministral-3b-latest": 128000,
}
MISTRALAI_FUNCTION_CALLING_MODELS = (
"mistral-large-latest",
"open-mixtral-8x22b",
"ministral-8b-latest",
"ministral-3b-latest",
)
MISTRALAI_CODE_MODELS = "codestral-latest"
def mistralai_modelname_to_contextsize(modelname: str) -> int:
# handling finetuned models
if modelname.startswith("ft:"):
modelname = modelname.split(":")[1]
if modelname not in MISTRALAI_MODELS:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid MistralAI model name."
"Known models are: " + ", ".join(MISTRALAI_MODELS.keys())
)
return MISTRALAI_MODELS[modelname]
def is_mistralai_function_calling_model(modelname: str) -> bool:
return modelname in MISTRALAI_FUNCTION_CALLING_MODELS
def is_mistralai_code_model(modelname: str) -> bool:
return modelname in MISTRALAI_CODE_MODELS
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ClearMLCallbackHandler": "langchain_community.callbacks.clearml_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ClearMLCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.clearml_callback import ClearMLCallbackHandler
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ClearMLCallbackHandler": "langchain_community.callbacks.clearml_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ClearMLCallbackHandler",
]
|
"""Standard LangChain interface tests."""
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatMistralAI
@property
def chat_model_params(self) -> dict:
return {"model": "mistral-large-latest", "temperature": 0}
@property
def supports_json_mode(self) -> bool:
return True
|
"""Standard LangChain interface tests"""
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ( # type: ignore[import-not-found]
ChatModelIntegrationTests, # type: ignore[import-not-found]
)
from langchain_mistralai import ChatMistralAI
class TestMistralStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatMistralAI
@property
def chat_model_params(self) -> dict:
return {"model": "mistral-large-latest", "temperature": 0}
@property
def supports_json_mode(self) -> bool:
return True
|
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.StackedRNNCells")
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Args:
cells: List of RNN cell instances.
Example:
```python
batch_size = 3
sentence_length = 5
num_features = 2
new_shape = (batch_size, sentence_length, num_features)
x = np.reshape(np.arange(30), new_shape)
rnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = keras.layers.StackedRNNCells(rnn_cells)
lstm_layer = keras.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
"""
def __init__(self, cells, **kwargs):
super().__init__(**kwargs)
for cell in cells:
if "call" not in dir(cell):
raise ValueError(
"All cells must have a `call` method. "
f"Received cell without a `call` method: {cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"All cells must have a `state_size` attribute. "
f"Received cell without a `state_size`: {cell}"
)
self.cells = cells
@property
def state_size(self):
return [c.state_size for c in self.cells]
@property
def output_size(self):
if getattr(self.cells[-1], "output_size", None) is not None:
return self.cells[-1].output_size
elif isinstance(self.cells[-1].state_size, (list, tuple)):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, batch_size=None):
initial_states = []
for cell in self.cells:
get_initial_state_fn = getattr(cell, "get_initial_state", None)
if get_initial_state_fn:
initial_states.append(
get_initial_state_fn(batch_size=batch_size)
)
else:
if isinstance(cell.state_size, int):
initial_states.append(
ops.zeros(
(batch_size, cell.state_size),
dtype=self.compute_dtype,
)
)
else:
initial_states.append(
[
ops.zeros((batch_size, d), dtype=self.compute_dtype)
for d in cell.state_size
]
)
return initial_states
def call(self, inputs, states, training=False, **kwargs):
# Call the cells in order and store the returned states.
new_states = []
for cell, states in zip(self.cells, states):
state_is_list = tree.is_nested(states)
states = list(states) if tree.is_nested(states) else [states]
if isinstance(cell, Layer) and cell._call_has_training_arg:
kwargs["training"] = training
else:
kwargs.pop("training", None)
cell_call_fn = cell.__call__ if callable(cell) else cell.call
inputs, states = cell_call_fn(inputs, states, **kwargs)
if len(states) == 1 and not state_is_list:
states = states[0]
new_states.append(states)
if len(new_states) == 1:
new_states = new_states[0]
return inputs, new_states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer) and not cell.built:
cell.build(input_shape)
cell.built = True
if getattr(cell, "output_size", None) is not None:
output_dim = cell.output_size
elif isinstance(cell.state_size, (list, tuple)):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
batch_size = tree.flatten(input_shape)[0]
input_shape = (batch_size, output_dim)
def get_config(self):
cells = []
for cell in self.cells:
cells.append(serialization_lib.serialize_keras_object(cell))
config = {"cells": cells}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
cells = []
for cell_config in config.pop("cells"):
cells.append(
serialization_lib.deserialize_keras_object(
cell_config, custom_objects=custom_objects
)
)
return cls(cells, **config)
|
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.StackedRNNCells")
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Args:
cells: List of RNN cell instances.
Example:
```python
batch_size = 3
sentence_length = 5
num_features = 2
new_shape = (batch_size, sentence_length, num_features)
x = np.reshape(np.arange(30), new_shape)
rnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = keras.layers.StackedRNNCells(rnn_cells)
lstm_layer = keras.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
"""
def __init__(self, cells, **kwargs):
super().__init__(**kwargs)
for cell in cells:
if "call" not in dir(cell):
raise ValueError(
"All cells must have a `call` method. "
f"Received cell without a `call` method: {cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"All cells must have a `state_size` attribute. "
f"Received cell without a `state_size`: {cell}"
)
self.cells = cells
@property
def state_size(self):
return [c.state_size for c in self.cells]
@property
def output_size(self):
if getattr(self.cells[-1], "output_size", None) is not None:
return self.cells[-1].output_size
elif isinstance(self.cells[-1].state_size, (list, tuple)):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, batch_size=None):
initial_states = []
for cell in self.cells:
get_initial_state_fn = getattr(cell, "get_initial_state", None)
if get_initial_state_fn:
initial_states.append(
get_initial_state_fn(batch_size=batch_size)
)
else:
if isinstance(cell.state_size, int):
initial_states.append(
ops.zeros(
(batch_size, cell.state_size),
dtype=self.compute_dtype,
)
)
else:
initial_states.append(
[
ops.zeros((batch_size, d), dtype=self.compute_dtype)
for d in cell.state_size
]
)
return initial_states
def call(self, inputs, states, training=False, **kwargs):
# Call the cells in order and store the returned states.
new_states = []
for cell, states in zip(self.cells, states):
state_is_list = tree.is_nested(states)
states = list(states) if tree.is_nested(states) else [states]
if isinstance(cell, Layer) and cell._call_has_training_arg:
kwargs["training"] = training
else:
kwargs.pop("training", None)
cell_call_fn = cell.__call__ if callable(cell) else cell.call
inputs, states = cell_call_fn(inputs, states, **kwargs)
if len(states) == 1 and not state_is_list:
states = states[0]
new_states.append(states)
if len(new_states) == 1:
new_states = new_states[0]
return inputs, new_states
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer) and not cell.built:
cell.build(input_shape)
cell.built = True
if getattr(cell, "output_size", None) is not None:
output_dim = cell.output_size
elif isinstance(cell.state_size, (list, tuple)):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
batch_size = tree.flatten(input_shape)[0]
input_shape = (batch_size, output_dim)
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append(serialization_lib.serialize_keras_object(cell))
config = {"cells": cells}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
cells = []
for cell_config in config.pop("cells"):
cells.append(
serialization_lib.deserialize_keras_object(
cell_config, custom_objects=custom_objects
)
)
return cls(cells, **config)
|
import os
import pytest
from jina import Client, Document, Executor, Flow, requests
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture()
def flow(request, port_generator):
exposed_port = port_generator()
flow_src = request.param
if flow_src == 'flow-yml':
return Flow.load_config(os.path.join(cur_dir, 'flow.yml'))
elif flow_src == 'uses-yml':
return Flow(port=exposed_port).add(
uses=os.path.join(cur_dir, 'default_config.yml'),
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
)
elif flow_src == 'class':
from .executor import Override
return Flow(port=exposed_port).add(
uses=Override,
uses_with={'param1': 50, 'param2': 30, 'param3': 10},
uses_metas={'workspace': 'different_workspace', 'name': 'name'},
)
@pytest.mark.parametrize('flow', ['flow-yml', 'uses-yml', 'class'], indirect=['flow'])
def test_override_config_params(flow):
with flow:
resps = Client(port=flow.port).search(
inputs=[Document()], return_responses=True
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
def test_override_config_params_shards(port_generator):
exposed_port = port_generator()
flow = Flow(port=exposed_port).add(
uses=os.path.join(cur_dir, 'default_config.yml'),
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
shards=2,
)
with flow:
resps = Client(port=flow.port).search(
inputs=[Document()], return_responses=True
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
def test_override_requests(port_generator):
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
for d in docs:
d.text = 'foo'
def bar(self, docs, **kwargs):
for d in docs:
d.text = 'bar'
@requests(on=['/1', '/2'])
def foobar(self, docs, **kwargs):
for d in docs:
d.text = 'foobar'
exposed_port = port_generator()
# original
f = Flow(port=exposed_port).add(uses=MyExec)
with f:
req = Client(port=exposed_port).post(
'/index', Document(), return_responses=True
)
assert req[0].docs[0].text == 'foo'
# change bind to bar()
f = Flow(port=exposed_port).add(uses=MyExec, uses_requests={'/index': 'bar'})
with f:
req = Client(port=exposed_port).post(
'/index', Document(), return_responses=True
)
assert req[0].docs[0].text == 'bar'
req = Client(port=exposed_port).post('/1', Document(), return_responses=True)
assert req[0].docs[0].text == 'foobar'
# change bind to foobar()
f = Flow(port=exposed_port).add(uses=MyExec, uses_requests={'/index': 'foobar'})
with f:
req = Client(port=exposed_port).post(
'/index', Document(), return_responses=True
)
assert req[0].docs[0].text == 'foobar'
req = Client(port=exposed_port).post(
'/index-blah', Document(), return_responses=True
)
assert req[0].docs[0].text == 'foo'
# change default bind to foo()
f = Flow(port=exposed_port).add(uses=MyExec, uses_requests={'/default': 'bar'})
with f:
req = Client(port=exposed_port).post(
'/index', Document(), return_responses=True
)
assert req[0].docs[0].text == 'bar'
|
import os
import pytest
from jina import Client, Document, Executor, Flow, requests
cur_dir = os.path.dirname(os.path.abspath(__file__))
exposed_port = 12345
@pytest.fixture()
def flow(request):
flow_src = request.param
if flow_src == 'flow-yml':
return Flow.load_config(os.path.join(cur_dir, 'flow.yml'))
elif flow_src == 'uses-yml':
return Flow(port=exposed_port).add(
uses=os.path.join(cur_dir, 'default_config.yml'),
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
)
elif flow_src == 'class':
from .executor import Override
return Flow(port=exposed_port).add(
uses=Override,
uses_with={'param1': 50, 'param2': 30, 'param3': 10},
uses_metas={'workspace': 'different_workspace', 'name': 'name'},
)
@pytest.mark.parametrize('flow', ['flow-yml', 'uses-yml', 'class'], indirect=['flow'])
def test_override_config_params(flow):
with flow:
resps = Client(port=exposed_port, return_responses=True).search(
inputs=[Document()]
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
def test_override_config_params_shards():
flow = Flow(port=exposed_port).add(
uses=os.path.join(cur_dir, 'default_config.yml'),
uses_with={'param1': 50, 'param2': 30},
uses_metas={'workspace': 'different_workspace'},
shards=2,
)
with flow:
resps = Client(port=exposed_port, return_responses=True).search(
inputs=[Document()]
)
doc = resps[0].docs[0]
assert doc.tags['param1'] == 50
assert doc.tags['param2'] == 30
assert doc.tags['param3'] == 10 # not overriden
assert doc.tags['name'] == 'name' # not override
assert doc.tags['workspace'] == 'different_workspace'
def test_override_requests():
class MyExec(Executor):
@requests
def foo(self, docs, **kwargs):
for d in docs:
d.text = 'foo'
def bar(self, docs, **kwargs):
for d in docs:
d.text = 'bar'
@requests(on=['/1', '/2'])
def foobar(self, docs, **kwargs):
for d in docs:
d.text = 'foobar'
# original
f = Flow(port=exposed_port).add(uses=MyExec)
with f:
req = Client(port=exposed_port, return_responses=True).post(
'/index', Document()
)
assert req[0].docs[0].text == 'foo'
# change bind to bar()
f = Flow(port=exposed_port).add(uses=MyExec, uses_requests={'/index': 'bar'})
with f:
req = Client(port=exposed_port, return_responses=True).post(
'/index', Document()
)
assert req[0].docs[0].text == 'bar'
req = Client(port=exposed_port, return_responses=True).post('/1', Document())
assert req[0].docs[0].text == 'foobar'
# change bind to foobar()
f = Flow(port=exposed_port).add(uses=MyExec, uses_requests={'/index': 'foobar'})
with f:
req = Client(port=exposed_port, return_responses=True).post(
'/index', Document()
)
assert req[0].docs[0].text == 'foobar'
req = Client(port=exposed_port, return_responses=True).post(
'/index-blah', Document()
)
assert req[0].docs[0].text == 'foo'
# change default bind to foo()
f = Flow(port=exposed_port).add(uses=MyExec, uses_requests={'/default': 'bar'})
with f:
req = Client(port=exposed_port, return_responses=True).post(
'/index', Document()
)
assert req[0].docs[0].text == 'bar'
|
import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
file: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> 'T':
"""Save :attr:`.tensor` into an wav file. Mono/stereo is preserved.
:param file: if file is a string, open the file by that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
:return: Document itself after processed
"""
# Convert to (little-endian) 16 bit integers.
max_int16 = 2**15
tensor = (self.tensor * max_int16).astype('<h')
n_channels = 2 if self.tensor.ndim > 1 else 1
with wave.open(file, 'w') as f:
# 2 Channels.
f.setnchannels(n_channels)
# 2 bytes per sample.
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(tensor.tobytes())
return self
def load_uri_to_audio_tensor(self: 'T') -> 'T':
"""Convert an audio :attr:`.uri` into :attr:`.tensor` inplace
:return: Document itself after processed
"""
with wave.open(
self.uri
) as ifile: #: note wave is Python built-in module https://docs.python.org/3/library/wave.html
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(np.float32)
# Normalise float32 array so that values are between -1.0 and +1.0
max_int16 = 2**15
audio_normalised = audio_as_np_float32 / max_int16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty(
(int(len(audio_normalised) / channels), channels)
)
audio_stereo[:, 0] = audio_normalised[
range(0, len(audio_normalised), 2)
]
audio_stereo[:, 1] = audio_normalised[
range(1, len(audio_normalised), 2)
]
self.tensor = audio_stereo
else:
self.tensor = audio_normalised
return self
|
import wave
from typing import Union, BinaryIO, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class AudioDataMixin:
"""Provide helper functions for :class:`Document` to support audio data."""
def save_audio_tensor_to_file(
self: 'T',
file: Union[str, BinaryIO],
sample_rate: int = 44100,
sample_width: int = 2,
) -> 'T':
"""Save :attr:`.tensor` into an wav file. Mono/stereo is preserved.
:param file: if file is a string, open the file by that name, otherwise treat it as a file-like object.
:param sample_rate: sampling frequency
:param sample_width: sample width in bytes
:return: Document itself after processed
"""
# Convert to (little-endian) 16 bit integers.
max_int16 = 2**15
tensor = (self.tensor * max_int16).astype('<h')
n_channels = 2 if self.tensor.ndim > 1 else 1
with wave.open(file, 'w') as f:
# 2 Channels.
f.setnchannels(n_channels)
# 2 bytes per sample.
f.setsampwidth(sample_width)
f.setframerate(sample_rate)
f.writeframes(tensor.tobytes())
return self
def load_uri_to_audio_tensor(self: 'T') -> 'T':
"""Convert an audio :attr:`.uri` into :attr:`.tensor` inplace
:return: Document itself after processed
"""
ifile = wave.open(
self.uri
) #: note wave is Python built-in module https://docs.python.org/3/library/wave.html
samples = ifile.getnframes()
audio = ifile.readframes(samples)
# Convert buffer to float32 using NumPy
audio_as_np_int16 = np.frombuffer(audio, dtype=np.int16)
audio_as_np_float32 = audio_as_np_int16.astype(np.float32)
# Normalise float32 array so that values are between -1.0 and +1.0
max_int16 = 2**15
audio_normalised = audio_as_np_float32 / max_int16
channels = ifile.getnchannels()
if channels == 2:
# 1 for mono, 2 for stereo
audio_stereo = np.empty((int(len(audio_normalised) / channels), channels))
audio_stereo[:, 0] = audio_normalised[range(0, len(audio_normalised), 2)]
audio_stereo[:, 1] = audio_normalised[range(1, len(audio_normalised), 2)]
self.tensor = audio_stereo
else:
self.tensor = audio_normalised
return self
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from docarray import BaseDocument
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(BaseDocument):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio.abstract_audio_tensor import AbstractAudioTensor
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
@_register_proto(proto_type_name='audio_torch_tensor')
class AudioTorchTensor(AbstractAudioTensor, TorchTensor, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent an audio tensor.
Adds audio-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import Document
from docarray.typing import AudioTorchTensor, AudioUrl
class MyAudioDoc(Document):
title: str
audio_tensor: Optional[AudioTorchTensor]
url: Optional[AudioUrl]
bytes_: Optional[bytes]
doc_1 = MyAudioDoc(
title='my_first_audio_doc',
audio_tensor=torch.randn(size=(1000, 2)),
)
doc_1.audio_tensor.save_to_wav_file(file_path='path/to/file_1.wav')
doc_1.bytes_ = doc_1.audio_tensor.to_bytes()
doc_2 = MyAudioDoc(
title='my_second_audio_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.audio_tensor = doc_2.url.load()
doc_2.audio_tensor.save_to_wav_file(file_path='path/to/file_2.wav')
doc_2.bytes_ = doc_1.audio_tensor.to_bytes()
"""
...
|
from typing import Optional
import pytest
from langchain_cli.constants import (
DEFAULT_GIT_REF,
DEFAULT_GIT_REPO,
DEFAULT_GIT_SUBDIRECTORY,
)
from langchain_cli.utils.git import DependencySource, parse_dependency_string
def _assert_dependency_equals(
dep: DependencySource,
*,
git: Optional[str] = None,
ref: Optional[str] = None,
subdirectory: Optional[str] = None,
event_metadata: Optional[dict] = None,
) -> None:
assert dep["git"] == git
assert dep["ref"] == ref
assert dep["subdirectory"] == subdirectory
if event_metadata is not None:
assert dep["event_metadata"] == event_metadata
def test_dependency_string() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git", None, None, None
),
git="ssh://git@github.com/efriis/myrepo.git",
ref=None,
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git#develop", None, None, None
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# also support a slash in ssh
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git#develop", None, None, None
),
git="ssh://git@github.com/efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# looks like poetry supports both an @ and a #
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git@develop", None, None, None
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string("simple-pirate", None, None, None),
git=DEFAULT_GIT_REPO,
subdirectory=f"{DEFAULT_GIT_SUBDIRECTORY}/simple-pirate",
ref=DEFAULT_GIT_REF,
)
def test_dependency_string_both() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@branch#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref="branch",
)
def test_dependency_string_invalids() -> None:
# expect error for wrong order
with pytest.raises(ValueError):
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src@branch",
None,
None,
None,
)
# expect error for @subdirectory
def test_dependency_string_edge_case() -> None:
# weird unsolvable edge case of
# git+ssh://a@b
# this could be a ssh dep with user=a, and default ref
# or a ssh dep at a with ref=b.
# in this case, assume the first case (be greedy with the '@')
_assert_dependency_equals(
parse_dependency_string("git+ssh://a@b", None, None, None),
git="ssh://a@b",
subdirectory=None,
ref=None,
)
# weird one that is actually valid
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory=None,
ref="subdirectory=src",
)
|
from typing import Dict, Optional
import pytest
from langchain_cli.constants import (
DEFAULT_GIT_REF,
DEFAULT_GIT_REPO,
DEFAULT_GIT_SUBDIRECTORY,
)
from langchain_cli.utils.git import DependencySource, parse_dependency_string
def _assert_dependency_equals(
dep: DependencySource,
*,
git: Optional[str] = None,
ref: Optional[str] = None,
subdirectory: Optional[str] = None,
event_metadata: Optional[Dict] = None,
) -> None:
assert dep["git"] == git
assert dep["ref"] == ref
assert dep["subdirectory"] == subdirectory
if event_metadata is not None:
assert dep["event_metadata"] == event_metadata
def test_dependency_string() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git", None, None, None
),
git="ssh://git@github.com/efriis/myrepo.git",
ref=None,
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref=None,
)
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git#develop", None, None, None
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# also support a slash in ssh
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com/efriis/myrepo.git#develop", None, None, None
),
git="ssh://git@github.com/efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
# looks like poetry supports both an @ and a #
_assert_dependency_equals(
parse_dependency_string(
"git+ssh://git@github.com:efriis/myrepo.git@develop", None, None, None
),
git="ssh://git@github.com:efriis/myrepo.git",
ref="develop",
subdirectory=None,
)
_assert_dependency_equals(
parse_dependency_string("simple-pirate", None, None, None),
git=DEFAULT_GIT_REPO,
subdirectory=f"{DEFAULT_GIT_SUBDIRECTORY}/simple-pirate",
ref=DEFAULT_GIT_REF,
)
def test_dependency_string_both() -> None:
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@branch#subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory="src",
ref="branch",
)
def test_dependency_string_invalids() -> None:
# expect error for wrong order
with pytest.raises(ValueError):
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git#subdirectory=src@branch",
None,
None,
None,
)
# expect error for @subdirectory
def test_dependency_string_edge_case() -> None:
# weird unsolvable edge case of
# git+ssh://a@b
# this could be a ssh dep with user=a, and default ref
# or a ssh dep at a with ref=b.
# in this case, assume the first case (be greedy with the '@')
_assert_dependency_equals(
parse_dependency_string("git+ssh://a@b", None, None, None),
git="ssh://a@b",
subdirectory=None,
ref=None,
)
# weird one that is actually valid
_assert_dependency_equals(
parse_dependency_string(
"git+https://github.com/efriis/myrepo.git@subdirectory=src",
None,
None,
None,
),
git="https://github.com/efriis/myrepo.git",
subdirectory=None,
ref="subdirectory=src",
)
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
class Pagination(pydantic.BaseModel):
total_items: int = pydantic.Field(
description="Total number of items.", examples=[42]
)
total_pages: int = pydantic.Field(
description="Total number of pages.", examples=[2]
)
current_page: int = pydantic.Field(
description="Current_page page number.", examples=[1]
)
page_size: int = pydantic.Field(
description="Number of items per page.", examples=[25]
)
|
import enum
from typing import Any, List, Optional, Union
import pydantic
import backend.data.graph
from backend.data.api_key import APIKeyPermission, APIKeyWithoutHash
class Methods(enum.Enum):
SUBSCRIBE = "subscribe"
UNSUBSCRIBE = "unsubscribe"
EXECUTION_EVENT = "execution_event"
ERROR = "error"
HEARTBEAT = "heartbeat"
class WsMessage(pydantic.BaseModel):
method: Methods
data: Optional[Union[dict[str, Any], list[Any], str]] = None
success: bool | None = None
channel: str | None = None
error: str | None = None
class ExecutionSubscription(pydantic.BaseModel):
graph_id: str
class SubscriptionDetails(pydantic.BaseModel):
event_type: str
channel: str
graph_id: str
class CreateGraph(pydantic.BaseModel):
template_id: str | None = None
template_version: int | None = None
graph: backend.data.graph.Graph | None = None
class CreateAPIKeyRequest(pydantic.BaseModel):
name: str
permissions: List[APIKeyPermission]
description: Optional[str] = None
class CreateAPIKeyResponse(pydantic.BaseModel):
api_key: APIKeyWithoutHash
plain_text_key: str
class SetGraphActiveVersion(pydantic.BaseModel):
active_graph_version: int
class UpdatePermissionsRequest(pydantic.BaseModel):
permissions: List[APIKeyPermission]
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.13.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.13'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.gateway import BaseGateway as Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
_set_start_method('fork')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.13.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.13'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from docarray import Document, DocumentArray
# Client
from jina.clients import Client
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Deployment
from jina.orchestrate.deployments import Deployment
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.gateway import BaseGateway as Gateway
|
from abc import abstractmethod
from typing import Any, Optional, Protocol, Sequence, runtime_checkable
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.llms.gradient_ai import TrainResult
@runtime_checkable
class TrainableLLM(Protocol):
"""Protocol for trainable language models."""
@abstractmethod
def train_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult: ...
@abstractmethod
async def atrain_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult: ...
class Memorize(BaseTool):
"""Tool that trains a language model."""
name: str = "memorize"
description: str = (
"Useful whenever you observed novel information "
"from previous conversation history, "
"i.e., another tool's action outputs or human comments. "
"The action input should include observed information in detail, "
"then the tool will fine-tune yourself to remember it."
)
llm: TrainableLLM = Field()
def _run(
self,
information_to_learn: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
train_result = self.llm.train_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
async def _arun(
self,
information_to_learn: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
train_result = await self.llm.atrain_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
|
from abc import abstractmethod
from typing import Any, Optional, Protocol, Sequence, runtime_checkable
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.llms.gradient_ai import TrainResult
@runtime_checkable
class TrainableLLM(Protocol):
"""Protocol for trainable language models."""
@abstractmethod
def train_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult: ...
@abstractmethod
async def atrain_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult: ...
class Memorize(BaseTool): # type: ignore[override]
"""Tool that trains a language model."""
name: str = "memorize"
description: str = (
"Useful whenever you observed novel information "
"from previous conversation history, "
"i.e., another tool's action outputs or human comments. "
"The action input should include observed information in detail, "
"then the tool will fine-tune yourself to remember it."
)
llm: TrainableLLM = Field()
def _run(
self,
information_to_learn: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
train_result = self.llm.train_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
async def _arun(
self,
information_to_learn: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
train_result = await self.llm.atrain_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from mmengine.config import Config
from mmengine.config.utils import (_get_cfg_metainfo,
_get_external_cfg_base_path,
_get_package_and_cfg_path)
from mmengine.registry import MODELS, DefaultScope
from mmengine.runner import load_checkpoint
from mmengine.utils import get_installed_path, install_package
def get_config(cfg_path: str, pretrained: bool = False) -> Config:
"""Get config from external package.
Args:
cfg_path (str): External relative config path.
pretrained (bool): Whether to save pretrained model path. If
``pretrained==True``, the url of pretrained model can be accessed
by ``cfg.model_path``. Defaults to False.
Examples:
>>> cfg = get_config('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> # Equivalent to
>>> # cfg = Config.fromfile('/path/to/faster-rcnn_r50_fpn_1x_coco.py')
>>> cfg.model_path
https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
Returns:
Config: A `Config` parsed from external package.
""" # noqa E301
# Get package name and relative config path.
package, cfg_path = _get_package_and_cfg_path(cfg_path)
# Install package if it's not installed.
install_package(package)
package_path = get_installed_path(package)
try:
# Use `cfg_path` to search target config file.
cfg_meta = _get_cfg_metainfo(package_path, cfg_path)
cfg_path = osp.join(package_path, '.mim', cfg_meta['Config'])
cfg = Config.fromfile(cfg_path)
if pretrained:
assert 'Weights' in cfg_meta, ('Cannot find `Weights` in cfg_file'
'.metafile.yml, please check the'
'metafile')
cfg.model_path = cfg_meta['Weights']
except ValueError:
# Since the base config does not contain a metafile, the absolute
# config is `osp.join(package_path, cfg_path_prefix, cfg_name)`
cfg_path = _get_external_cfg_base_path(package_path, cfg_path)
cfg = Config.fromfile(cfg_path)
except Exception as e:
raise e
return cfg
def get_model(cfg_path: str, pretrained: bool = False, **kwargs):
"""Get built model from external package.
Args:
cfg_path (str): External relative config path with prefix
'package::' and without suffix.
pretrained (bool): Whether to load pretrained model. Defaults to False.
kwargs (dict): Default arguments to build model.
Examples:
>>> model = get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> type(model)
<class 'mmdet.models.detectors.faster_rcnn.FasterRCNN'>
Returns:
nn.Module: Built model.
""" # noqa E301
package = cfg_path.split('::')[0]
with DefaultScope.overwrite_default_scope(package): # type: ignore
cfg = get_config(cfg_path, pretrained)
models_module = importlib.import_module(f'{package}.utils')
models_module.register_all_modules() # type: ignore
model = MODELS.build(cfg.model, default_args=kwargs)
if pretrained:
load_checkpoint(model, cfg.model_path)
# Hack to use pretrained weights.
# If we do not set _is_init here, Runner will call
# `model.init_weights()` to overwrite the pretrained model.
model._is_init = True
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
import importlib
import os.path as osp
from mmengine.config import Config
from mmengine.config.utils import (_get_cfg_metainfo,
_get_external_cfg_base_path,
_get_package_and_cfg_path)
from mmengine.registry import MODELS, DefaultScope
from mmengine.runner import load_checkpoint
from mmengine.utils import get_installed_path, install_package
def get_config(cfg_path: str, pretrained: bool = False) -> Config:
"""Get config from external package.
Args:
cfg_path (str): External relative config path.
pretrained (bool): Whether to save pretrained model path. If
``pretrained==True``, the url of pretrained model can be accessed
by ``cfg.model_path``. Defaults to False.
Examples:
>>> cfg = get_config('mmdet::faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> # Equivalent to
>>> # cfg = Config.fromfile('/path/to/faster_rcnn_r50_fpn_1x_coco.py')
>>> cfg.model_path
https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth
Returns:
Config: A `Config` parsed from external package.
""" # noqa E301
# Get package name and relative config path.
package, cfg_path = _get_package_and_cfg_path(cfg_path)
# Install package if it's not installed.
install_package(package)
package_path = get_installed_path(package)
try:
# Use `cfg_path` to search target config file.
cfg_meta = _get_cfg_metainfo(package_path, cfg_path)
cfg_path = osp.join(package_path, '.mim', cfg_meta['Config'])
cfg = Config.fromfile(cfg_path)
if pretrained:
assert 'Weights' in cfg_meta, ('Cannot find `Weights` in cfg_file'
'.metafile.yml, please check the'
'metafile')
cfg.model_path = cfg_meta['Weights']
except ValueError:
# Since the base config does not contain a metafile, the absolute
# config is `osp.join(package_path, cfg_path_prefix, cfg_name)`
cfg_path = _get_external_cfg_base_path(package_path, cfg_path)
cfg = Config.fromfile(cfg_path)
except Exception as e:
raise e
return cfg
def get_model(cfg_path: str, pretrained: bool = False, **kwargs):
"""Get built model from external package.
Args:
cfg_path (str): External relative config path with prefix
'package::' and without suffix.
pretrained (bool): Whether to load pretrained model. Defaults to False.
kwargs (dict): Default arguments to build model.
Examples:
>>> model = get_model('mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py', pretrained=True)
>>> type(model)
<class 'mmdet.models.detectors.faster_rcnn.FasterRCNN'>
Returns:
nn.Module: Built model.
""" # noqa E301
package = cfg_path.split('::')[0]
with DefaultScope.overwrite_default_scope(package): # type: ignore
cfg = get_config(cfg_path, pretrained)
models_module = importlib.import_module(f'{package}.utils')
models_module.register_all_modules() # type: ignore
model = MODELS.build(cfg.model, default_args=kwargs)
if pretrained:
load_checkpoint(model, cfg.model_path)
# Hack to use pretrained weights.
# If we do not set _is_init here, Runner will call
# `model.init_weights()` to overwrite the pretrained model.
model._is_init = True
return model
|
"""
Initializer script that installs stuff to pip.
"""
from __future__ import annotations
import argparse
import logging
import os
import shutil
import subprocess
import sys
import time
def run_command(
args: list[str],
env: dict[str, str] | None = None,
) -> subprocess.CompletedProcess[str]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, env=env, text=True, encoding="utf-8", check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
def main() -> None:
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
parser.add_argument(
"--no-black-binary",
help="do not use pre-compiled binaries from pip for black.",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
env: dict[str, str] = {
**os.environ,
"UV_PYTHON": sys.executable,
"UV_PYTHON_DOWNLOADS": "never",
"FORCE_COLOR": "1",
"CLICOLOR_FORCE": "1",
}
uv_index_url = env.get("UV_INDEX_URL", env.get("PIP_EXTRA_INDEX_URL"))
if uv_index_url:
env["UV_INDEX_URL"] = uv_index_url
uv: str | None = shutil.which("uv")
if uv:
pip_args = [uv, "pip", "install"]
elif sys.executable:
pip_args = [sys.executable, "-mpip", "install"]
else:
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to produce a consistent linting experience."
)
if args.no_black_binary and "black" in package_name:
pip_args.append(f"--no-binary={package_name}")
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args, env=env)
if __name__ == "__main__":
main()
|
"""
Initializer script that installs stuff to pip.
"""
from __future__ import annotations
import argparse
import logging
import os
import shutil
import subprocess
import sys
import time
def run_command(args: list[str]) -> subprocess.CompletedProcess[bytes]:
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pip initializer")
parser.add_argument(
"packages",
nargs="+",
help="pip packages to install",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"--dry-run", help="do not install anything, just print what would be done."
)
parser.add_argument(
"--no-black-binary",
help="do not use pre-compiled binaries from pip for black.",
action="store_true",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(threadName)s:%(levelname)s> %(message)s",
level=logging.NOTSET if args.verbose else logging.DEBUG,
stream=sys.stderr,
)
uv_available = (
any(prefix in sys.base_prefix for prefix in ["uv/python", "uv\\python"])
and shutil.which("uv") is not None
)
if uv_available:
pip_args = ["uv", "pip", "install"]
elif sys.executable:
pip_args = [sys.executable, "-mpip", "install"]
else:
pip_args = ["pip3", "install"]
# If we are in a global install, use `--user` to install so that you do not
# need root access in order to initialize linters.
#
# However, `pip install --user` interacts poorly with virtualenvs (see:
# https://bit.ly/3vD4kvl) and conda (see: https://bit.ly/3KG7ZfU). So in
# these cases perform a regular installation.
in_conda = os.environ.get("CONDA_PREFIX") is not None
in_virtualenv = os.environ.get("VIRTUAL_ENV") is not None
if not in_conda and not in_virtualenv:
pip_args.append("--user")
pip_args.extend(args.packages)
for package in args.packages:
package_name, _, version = package.partition("=")
if version == "":
raise RuntimeError(
"Package {package_name} did not have a version specified. "
"Please specify a version to produce a consistent linting experience."
)
if args.no_black_binary and "black" in package_name:
pip_args.append(f"--no-binary={package_name}")
dry_run = args.dry_run == "1"
if dry_run:
print(f"Would have run: {pip_args}")
sys.exit(0)
run_command(pip_args)
|
"""XGBoost Experimental Federated Learning related API."""
import ctypes
from threading import Thread
from typing import Any, Dict, Optional
from .core import _LIB, _check_call, make_jcargs
from .tracker import RabitTracker
class FederatedTracker(RabitTracker):
"""Tracker for federated training.
Parameters
----------
n_workers :
The number of federated workers.
port :
The port to listen on.
secure :
Whether this is a secure instance. If True, then the following arguments for SSL
must be provided.
server_key_path :
Path to the server private key file.
server_cert_path :
Path to the server certificate file.
client_cert_path :
Path to the client certificate file.
"""
def __init__( # pylint: disable=R0913, W0231
self,
n_workers: int,
port: int,
secure: bool,
server_key_path: str = "",
server_cert_path: str = "",
client_cert_path: str = "",
timeout: int = 300,
) -> None:
handle = ctypes.c_void_p()
args = make_jcargs(
n_workers=n_workers,
port=port,
dmlc_communicator="federated",
federated_secure=secure,
server_key_path=server_key_path,
server_cert_path=server_cert_path,
client_cert_path=client_cert_path,
timeout=int(timeout),
)
_check_call(_LIB.XGTrackerCreate(args, ctypes.byref(handle)))
self.handle = handle
def run_federated_server( # pylint: disable=too-many-arguments
n_workers: int,
port: int,
server_key_path: Optional[str] = None,
server_cert_path: Optional[str] = None,
client_cert_path: Optional[str] = None,
blocking: bool = True,
timeout: int = 300,
) -> Optional[Dict[str, Any]]:
"""See :py:class:`~xgboost.federated.FederatedTracker` for more info.
Parameters
----------
blocking :
Block the server until the training is finished. If set to False, the function
launches an additional thread and returns the worker arguments. The default is
True and a higher level framework is responsible for setting worker parameters.
"""
args: Dict[str, Any] = {"n_workers": n_workers}
secure = all(
path is not None
for path in [server_key_path, server_cert_path, client_cert_path]
)
tracker = FederatedTracker(
n_workers=n_workers, port=port, secure=secure, timeout=timeout
)
tracker.start()
if blocking:
tracker.wait_for()
return None
thread = Thread(target=tracker.wait_for)
thread.daemon = True
thread.start()
args.update(tracker.worker_args())
return args
|
"""XGBoost Experimental Federated Learning related API."""
import ctypes
from threading import Thread
from typing import Any, Dict, Optional
from .core import _LIB, _check_call, make_jcargs
from .tracker import RabitTracker
class FederatedTracker(RabitTracker):
"""Tracker for federated training.
Parameters
----------
n_workers :
The number of federated workers.
port :
The port to listen on.
secure :
Whether this is a secure instance. If True, then the following arguments for SSL
must be provided.
server_key_path :
Path to the server private key file.
server_cert_path :
Path to the server certificate file.
client_cert_path :
Path to the client certificate file.
"""
def __init__( # pylint: disable=R0913, W0231
self,
n_workers: int,
port: int,
secure: bool,
server_key_path: str = "",
server_cert_path: str = "",
client_cert_path: str = "",
timeout: int = 300,
) -> None:
handle = ctypes.c_void_p()
args = make_jcargs(
n_workers=n_workers,
port=port,
dmlc_communicator="federated",
federated_secure=secure,
server_key_path=server_key_path,
server_cert_path=server_cert_path,
client_cert_path=client_cert_path,
timeout=int(timeout),
)
_check_call(_LIB.XGTrackerCreate(args, ctypes.byref(handle)))
self.handle = handle
def run_federated_server( # pylint: disable=too-many-arguments
n_workers: int,
port: int,
server_key_path: Optional[str] = None,
server_cert_path: Optional[str] = None,
client_cert_path: Optional[str] = None,
timeout: int = 300,
) -> Dict[str, Any]:
"""See :py:class:`~xgboost.federated.FederatedTracker` for more info."""
args: Dict[str, Any] = {"n_workers": n_workers}
secure = all(
path is not None
for path in [server_key_path, server_cert_path, client_cert_path]
)
tracker = FederatedTracker(
n_workers=n_workers, port=port, secure=secure, timeout=timeout
)
tracker.start()
thread = Thread(target=tracker.wait_for)
thread.daemon = True
thread.start()
args.update(tracker.worker_args())
return args
|
class XYXY:
"""XYXY contains axis indices for the XYXY format.
All values in the XYXY format should be absolute pixel values.
The XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
class REL_XYXY:
"""REL_XYXY contains axis indices for the REL_XYXY format.
REL_XYXY is like XYXY, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
class CENTER_XYWH:
"""CENTER_XYWH contains axis indices for the CENTER_XYWH format.
All values in the CENTER_XYWH format should be absolute pixel values.
The CENTER_XYWH format consists of the following required indices:
- X: X coordinate of the center of the bounding box
- Y: Y coordinate of the center of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class XYWH:
"""XYWH contains axis indices for the XYWH format.
All values in the XYWH format should be absolute pixel values.
The XYWH format consists of the following required indices:
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class REL_XYWH:
"""REL_XYWH contains axis indices for the XYWH format.
REL_XYXY is like XYWH, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class YXYX:
"""YXYX contains axis indices for the YXYX format.
All values in the YXYX format should be absolute pixel values.
The YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
class REL_YXYX:
"""REL_YXYX contains axis indices for the REL_YXYX format.
REL_YXYX is like YXYX, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
|
class XYXY:
"""XYXY contains axis indices for the XYXY format.
All values in the XYXY format should be absolute pixel values.
The XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
class REL_XYXY:
"""REL_XYXY contains axis indices for the REL_XYXY format.
REL_XYXY is like XYXY, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_XYXY format consists of the following required indices:
- LEFT: left of the bounding box
- TOP: top of the bounding box
- RIGHT: right of the bounding box
- BOTTOM: bottom of the bounding box
"""
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
class CENTER_XYWH:
"""CENTER_XYWH contains axis indices for the CENTER_XYWH format.
All values in the CENTER_XYWH format should be absolute pixel values.
The CENTER_XYWH format consists of the following required indices:
- X: X coordinate of the center of the bounding box
- Y: Y coordinate of the center of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class XYWH:
"""XYWH contains axis indices for the XYWH format.
All values in the XYWH format should be absolute pixel values.
The XYWH format consists of the following required indices:
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class REL_XYWH:
"""REL_XYWH contains axis indices for the XYWH format.
REL_XYXY is like XYWH, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
- X: X coordinate of the left of the bounding box
- Y: Y coordinate of the top of the bounding box
- WIDTH: width of the bounding box
- HEIGHT: height of the bounding box
"""
X = 0
Y = 1
WIDTH = 2
HEIGHT = 3
class YXYX:
"""YXYX contains axis indices for the YXYX format.
All values in the YXYX format should be absolute pixel values.
The YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
class REL_YXYX:
"""REL_YXYX contains axis indices for the REL_YXYX format.
REL_YXYX is like YXYX, but each value is relative to the width and height of
the origin image. Values are percentages of the origin images' width and
height respectively.
The REL_YXYX format consists of the following required indices:
- TOP: top of the bounding box
- LEFT: left of the bounding box
- BOTTOM: bottom of the bounding box
- RIGHT: right of the bounding box
"""
TOP = 0
LEFT = 1
BOTTOM = 2
RIGHT = 3
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations
from mmdet.registry import TRANSFORMS
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = TRANSFORMS.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations,
LoadPanopticAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.pipelines import LoadAnnotations, LoadPanopticAnnotations
from mmdet.registry import TRANSFORMS
def replace_ImageToTensor(pipelines):
"""Replace the ImageToTensor transform in a data pipeline to
DefaultFormatBundle, which is normally useful in batch inference.
Args:
pipelines (list[dict]): Data pipeline configs.
Returns:
list: The new pipeline list with all ImageToTensor replaced by
DefaultFormatBundle.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(
... type='MultiScaleFlipAug',
... img_scale=(1333, 800),
... flip=False,
... transforms=[
... dict(type='Resize', keep_ratio=True),
... dict(type='RandomFlip'),
... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
... dict(type='Pad', size_divisor=32),
... dict(type='ImageToTensor', keys=['img']),
... dict(type='Collect', keys=['img']),
... ])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(
... type='MultiScaleFlipAug',
... img_scale=(1333, 800),
... flip=False,
... transforms=[
... dict(type='Resize', keep_ratio=True),
... dict(type='RandomFlip'),
... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img']),
... ])
... ]
>>> assert expected_pipelines == replace_ImageToTensor(pipelines)
"""
pipelines = copy.deepcopy(pipelines)
for i, pipeline in enumerate(pipelines):
if pipeline['type'] == 'MultiScaleFlipAug':
assert 'transforms' in pipeline
pipeline['transforms'] = replace_ImageToTensor(
pipeline['transforms'])
elif pipeline['type'] == 'ImageToTensor':
warnings.warn(
'"ImageToTensor" pipeline is replaced by '
'"DefaultFormatBundle" for batch inference. It is '
'recommended to manually replace it in the test '
'data pipeline in your config file.', UserWarning)
pipelines[i] = {'type': 'DefaultFormatBundle'}
return pipelines
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = TRANSFORMS.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations,
LoadPanopticAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SOLOV2Head
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import ConfigDict
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import SOLOV2Head
from mmdet.structures.mask import BitmapMasks
def _rand_masks(num_items, bboxes, img_w, img_h):
rng = np.random.RandomState(0)
masks = np.zeros((num_items, img_h, img_w))
for i, bbox in enumerate(bboxes):
bbox = bbox.astype(np.int32)
mask = (rng.rand(1, bbox[3] - bbox[1], bbox[2] - bbox[0]) >
0.3).astype(np.int)
masks[i:i + 1, bbox[1]:bbox[3], bbox[0]:bbox[2]] = mask
return BitmapMasks(masks, height=img_h, width=img_w)
def _fake_mask_feature_head():
mask_feature_head = ConfigDict(
feat_channels=128,
start_level=0,
end_level=3,
out_channels=256,
mask_stride=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
return mask_feature_head
class TestSOLOv2Head(TestCase):
def test_solov2_head_loss(self):
"""Tests mask head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}]
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
# SOLO head expects a multiple levels of features per image
feats = []
for i in range(len(mask_head.strides)):
feats.append(
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))
feats = tuple(feats)
mask_outs = mask_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty(0, 4)
gt_instances.labels = torch.LongTensor([])
gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)
empty_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_mask_loss = empty_gt_losses['loss_mask']
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_mask_loss.item(), 0,
'there should be no mask loss when there are no true mask')
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)
one_gt_losses = mask_head.loss_by_feat(
*mask_outs,
batch_gt_instances=[gt_instances],
batch_img_metas=img_metas)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_mask_loss = one_gt_losses['loss_mask']
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_mask_loss.item(), 0,
'mask loss should be non-zero')
def test_solov2_head_empty_result(self):
s = 256
img_metas = {
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': 1,
'batch_input_shape': (s, s, 3)
}
mask_feature_head = _fake_mask_feature_head()
mask_head = SOLOV2Head(
num_classes=4, in_channels=1, mask_feature_head=mask_feature_head)
kernel_preds = torch.empty(0, 128)
cls_scores = torch.empty(0, 80)
mask_feats = torch.empty(0, 16, 16)
test_cfg = ConfigDict(
score_thr=0.1,
mask_thr=0.5,
)
results = mask_head._predict_by_feat_single(
kernel_preds=kernel_preds,
cls_scores=cls_scores,
mask_feats=mask_feats,
img_meta=img_metas,
cfg=test_cfg)
self.assertIsInstance(results, InstanceData)
self.assertEqual(len(results), 0)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
import torch
import torchvision.models.video as models
from jina import Document, DocumentArray, Executor
from torchvision import transforms
from ...video_torch_encoder import (
ConvertFCHWtoCFHW,
ConvertFHWCtoFCHW,
VideoTorchEncoder,
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=False, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False, download_progress=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(
model_name=model_name, use_default_preprocessing=True, download_progress=False
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(
use_default_preprocessing=True,
model_name=model_name,
download_progress=False,
)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True, progress=False).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
@pytest.mark.gpu
def test_video_torch_encoder_use_default_preprocessing_gpu(model_name):
ex = VideoTorchEncoder(
model_name=model_name,
use_default_preprocessing=True,
device='cuda',
download_progress=False,
)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
assert ex.device == 'cuda'
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
import numpy as np
import pytest
import torch
import torchvision.models.video as models
from jina import Document, DocumentArray, Executor
from torchvision import transforms
from ...video_torch_encoder import (
ConvertFCHWtoCFHW,
ConvertFHWCtoFCHW,
VideoTorchEncoder,
)
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=False)
da = DocumentArray(
[Document(blob=np.random.random((3, 2, 224, 224))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('batch_size', [1, 3, 10])
def test_video_torch_encoder_traversal_paths(batch_size):
ex = VideoTorchEncoder(use_default_preprocessing=False)
def _create_doc_with_video_chunks():
d = Document(blob=np.random.random((3, 2, 112, 112)))
d.chunks = [Document(blob=np.random.random((3, 2, 112, 112))) for _ in range(5)]
return d
da = DocumentArray([_create_doc_with_video_chunks() for _ in range(10)])
ex.encode(da, {'traversal_paths': ['r', 'c'], 'batch_size': batch_size})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
assert len(doc.chunks) == 5
for chunk in doc.chunks:
assert chunk.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['r3d_18', 'mc3_18', 'r2plus1d_18'])
def test_video_torch_encoder_use_default_preprocessing(model_name):
ex = VideoTorchEncoder(model_name=model_name, use_default_preprocessing=True)
da = DocumentArray(
[Document(blob=np.random.random((10, 270, 480, 3))) for _ in range(10)]
)
ex.encode(da, {})
assert len(da) == 10
for doc in da:
assert doc.embedding.shape == (512,)
@pytest.fixture()
def kinects_videos():
from torchvision.datasets import Kinetics400
dataset = Kinetics400(
root=Path(__file__).parents[1] / 'data/kinetics400', frames_per_clip=20
)
return [dataset[0][0], dataset[0][0]]
@pytest.mark.parametrize('model_name', ['mc3_18', 'r2plus1d_18', 'r3d_18'])
def test_with_dataset_video(model_name, kinects_videos):
da = DocumentArray(
[Document(blob=video.detach().numpy()) for video in kinects_videos]
)
ex = VideoTorchEncoder(use_default_preprocessing=True, model_name=model_name)
ex.encode(da, {})
assert len(da) == 2
for doc in da:
assert doc.embedding.shape == (512,)
model = getattr(models, model_name)(pretrained=True).eval()
mean = (0.43216, 0.394666, 0.37645)
std = (0.22803, 0.22145, 0.216989)
resize_size = (128, 171)
crop_size = (112, 112)
t = transforms.Compose(
[
ConvertFHWCtoFCHW(),
transforms.ConvertImageDtype(torch.float32),
transforms.Resize(resize_size),
transforms.Normalize(mean=mean, std=std),
transforms.CenterCrop(crop_size),
ConvertFCHWtoCFHW(),
]
)
tensor = torch.stack([t(video) for video in kinects_videos])
def _get_embeddings(x) -> torch.Tensor:
embeddings = torch.Tensor()
def get_activation(model, model_input, output):
nonlocal embeddings
embeddings = output
handle = model.avgpool.register_forward_hook(get_activation)
model(x)
handle.remove()
return embeddings.flatten(1)
embedding_batch = _get_embeddings(tensor)
for doc, expected_torch_embedding in zip(da, embedding_batch):
np.testing.assert_almost_equal(
doc.embedding, expected_torch_embedding.detach().numpy()
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder."""
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
|
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder."""
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.dataclasses import * # noqa: F403
except ImportError:
from pydantic.dataclasses import * # type: ignore # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""OpenAI Finetuning."""
import logging
import json
import os
import requests
from typing import Any, Optional
from openai import AzureOpenAI as SyncAzureOpenAI
from llama_index.core.llms.llm import LLM
from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineTuningHandler
from llama_index.finetuning import OpenAIFinetuneEngine
from llama_index.llms.azure_openai import AzureOpenAI
logger = logging.getLogger(__name__)
class AzureOpenAIFinetuneEngine(OpenAIFinetuneEngine):
"""AzureOpenAI Finetuning Engine."""
def __init__(
self,
base_model: str,
data_path: str,
verbose: bool = False,
start_job_id: Optional[str] = None,
validate_json: bool = True,
) -> None:
"""Init params."""
self.base_model = base_model
self.data_path = data_path
self._verbose = verbose
self._validate_json = validate_json
self._start_job: Optional[Any] = None
self._client = SyncAzureOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_API_KEY", None),
api_version=os.getenv("OPENAI_API_VERSION", "2024-02-01"),
)
if start_job_id is not None:
self._start_job = self._client.fine_tuning.jobs.retrieve(start_job_id)
@classmethod
def from_finetuning_handler(
cls,
finetuning_handler: OpenAIFineTuningHandler,
base_model: str,
data_path: str,
**kwargs: Any,
) -> "AzureOpenAIFinetuneEngine":
"""
Initialize from finetuning handler.
Used to finetune an AzureOpenAI model into another
AzureOpenAI model (e.g. gpt-4o-mini on top of gpt-4o).
"""
finetuning_handler.save_finetuning_events(data_path)
return cls(base_model=base_model, data_path=data_path, **kwargs)
def deploy_finetuned_model(
self,
token: str,
subscription_id: str,
resource_group: str,
resource_name: str,
model_deployment_name: Optional[str] = None,
) -> LLM:
"""
Deploy finetuned model.
- token: Azure AD token.
- subscription_id: The subscription ID for the associated Azure OpenAI resource.
- resource_group: The resource group name for your Azure OpenAI resource.
- resource_name: The Azure OpenAI resource name.
- model_deployment_name: Custom deployment name that you will use to reference the model when making inference calls.
"""
current_job = self.get_current_job()
job_id = current_job.id
status = current_job.status
model_id = current_job.fine_tuned_model
if model_id is None:
raise ValueError(
f"Job {job_id} does not have a finetuned model id ready yet."
)
if status != "succeeded":
raise ValueError(f"Job {job_id} has status {status}, cannot get model")
fine_tuned_model = current_job.fine_tuned_model
if model_deployment_name is None:
model_deployment_name = fine_tuned_model
deploy_params = {"api-version": os.getenv("OPENAI_API_VERSION", "2024-02-01")}
deploy_headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
deploy_data = {
"sku": {"name": "standard", "capacity": 1},
"properties": {
"model": {"format": "OpenAI", "name": fine_tuned_model, "version": "1"}
},
}
deploy_data = json.dumps(deploy_data)
request_url = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.CognitiveServices/accounts/{resource_name}/deployments/{model_deployment_name}"
print("Creating a new deployment...")
response = requests.put(
request_url, params=deploy_params, headers=deploy_headers, data=deploy_data
)
return response.json()
def get_finetuned_model(self, engine: str, **model_kwargs: Any) -> LLM:
"""
Get finetuned model.
- engine: This will correspond to the custom name you chose
for your deployment when you deployed a model.
"""
current_job = self.get_current_job()
return AzureOpenAI(
engine=engine or current_job.fine_tuned_model, **model_kwargs
)
|
"""OpenAI Finetuning."""
import logging
import json
import os
import requests
from typing import Any, Optional
from openai import AzureOpenAI as SyncAzureOpenAI
from llama_index.core.llms.llm import LLM
from llama_index.finetuning.callbacks.finetuning_handler import OpenAIFineTuningHandler
from llama_index.finetuning import OpenAIFinetuneEngine
from llama_index.llms.azure_openai import AzureOpenAI
logger = logging.getLogger(__name__)
class AzureOpenAIFinetuneEngine(OpenAIFinetuneEngine):
"""AzureOpenAI Finetuning Engine."""
def __init__(
self,
base_model: str,
data_path: str,
verbose: bool = False,
start_job_id: Optional[str] = None,
validate_json: bool = True,
) -> None:
"""Init params."""
self.base_model = base_model
self.data_path = data_path
self._verbose = verbose
self._validate_json = validate_json
self._start_job: Optional[Any] = None
self._client = SyncAzureOpenAI(
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
api_key=os.getenv("AZURE_OPENAI_API_KEY", None),
api_version=os.getenv("OPENAI_API_VERSION", "2024-02-01"),
)
if start_job_id is not None:
self._start_job = self._client.fine_tuning.jobs.retrieve(start_job_id)
@classmethod
def from_finetuning_handler(
cls,
finetuning_handler: OpenAIFineTuningHandler,
base_model: str,
data_path: str,
**kwargs: Any,
) -> "AzureOpenAIFinetuneEngine":
"""Initialize from finetuning handler.
Used to finetune an AzureOpenAI model into another
AzureOpenAI model (e.g. gpt-4o-mini on top of gpt-4o).
"""
finetuning_handler.save_finetuning_events(data_path)
return cls(base_model=base_model, data_path=data_path, **kwargs)
def deploy_finetuned_model(
self,
token: str,
subscription_id: str,
resource_group: str,
resource_name: str,
model_deployment_name: Optional[str] = None,
) -> LLM:
"""Deploy finetuned model.
- token: Azure AD token.
- subscription_id: The subscription ID for the associated Azure OpenAI resource.
- resource_group: The resource group name for your Azure OpenAI resource.
- resource_name: The Azure OpenAI resource name.
- model_deployment_name: Custom deployment name that you will use to reference the model when making inference calls.
"""
current_job = self.get_current_job()
job_id = current_job.id
status = current_job.status
model_id = current_job.fine_tuned_model
if model_id is None:
raise ValueError(
f"Job {job_id} does not have a finetuned model id ready yet."
)
if status != "succeeded":
raise ValueError(f"Job {job_id} has status {status}, cannot get model")
fine_tuned_model = current_job.fine_tuned_model
if model_deployment_name is None:
model_deployment_name = fine_tuned_model
deploy_params = {"api-version": os.getenv("OPENAI_API_VERSION", "2024-02-01")}
deploy_headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
deploy_data = {
"sku": {"name": "standard", "capacity": 1},
"properties": {
"model": {"format": "OpenAI", "name": fine_tuned_model, "version": "1"}
},
}
deploy_data = json.dumps(deploy_data)
request_url = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.CognitiveServices/accounts/{resource_name}/deployments/{model_deployment_name}"
print("Creating a new deployment...")
response = requests.put(
request_url, params=deploy_params, headers=deploy_headers, data=deploy_data
)
return response.json()
def get_finetuned_model(self, engine: str, **model_kwargs: Any) -> LLM:
"""Get finetuned model.
- engine: This will correspond to the custom name you chose
for your deployment when you deployed a model.
"""
current_job = self.get_current_job()
return AzureOpenAI(
engine=engine or current_job.fine_tuned_model, **model_kwargs
)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.utils import summary_utils
class SummaryUtilsTest(testing.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
def test_print_model_summary_custom_build(self):
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(4, activation="relu")
self.dense2 = layers.Dense(2, activation="softmax")
self.unbuilt_dense = layers.Dense(1)
def build(self, input_shape):
self.dense1.build(input_shape)
input_shape = self.dense1.compute_output_shape(input_shape)
self.dense2.build(input_shape)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
model.build((None, 2))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense1
self.assertIn("(None, 2)", summary_content) # dense2
self.assertIn("?", summary_content) # unbuilt_dense
self.assertIn("Total params: 22", summary_content)
self.assertIn("Trainable params: 22", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
def test_print_model_summary_op_as_layer(self):
inputs = layers.Input((2,))
x = layers.Dense(4)(inputs)
outputs = ops.mean(x)
model = models.Model(inputs, outputs)
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(
model, print_fn=print_to_variable, show_trainable=True
)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense
self.assertIn("Y", summary_content) # dense
self.assertIn("()", summary_content) # mean
self.assertIn("-", summary_content) # mean
self.assertIn("Total params: 12", summary_content)
self.assertIn("Trainable params: 12", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
def test_print_model_summary_with_mha(self):
# In Keras <= 3.6, MHA exposes `output_shape` property which breaks this
# test.
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.mha = layers.MultiHeadAttention(2, 2, output_shape=(4,))
def call(self, inputs):
return self.mha(inputs, inputs, inputs)
model = MyModel()
model(np.ones((1, 2, 2)))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(1, 2, 4)", summary_content) # mha
self.assertIn("Total params: 56", summary_content)
self.assertIn("Trainable params: 56", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
|
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.utils import summary_utils
class SummaryUtilsTest(testing.TestCase):
@parameterized.parameters([("adam",), (None,)])
@pytest.mark.requires_trainable_backend
def test_print_model_summary(self, optimizer):
inputs = layers.Input((2,))
outputs = layers.Dense(3)(inputs)
model = models.Model(inputs, outputs)
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
if optimizer:
# Trigger the optimizer weights creation
model.fit(x=np.zeros([4, 2]), y=np.zeros([4, 3]))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
try:
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
if optimizer:
self.assertIn("Total params: 29", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertIn("Optimizer params: 20", summary_content)
else:
self.assertIn("Total params: 9", summary_content)
self.assertIn("Trainable params: 9", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
self.assertNotIn("Optimizer params", summary_content)
except ImportError:
pass
def test_print_model_summary_custom_build(self):
class MyModel(models.Model):
def __init__(self):
super().__init__()
self.dense1 = layers.Dense(4, activation="relu")
self.dense2 = layers.Dense(2, activation="softmax")
self.unbuilt_dense = layers.Dense(1)
def build(self, input_shape):
self.dense1.build(input_shape)
input_shape = self.dense1.compute_output_shape(input_shape)
self.dense2.build(input_shape)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
model.build((None, 2))
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(model, print_fn=print_to_variable)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense1
self.assertIn("(None, 2)", summary_content) # dense2
self.assertIn("?", summary_content) # unbuilt_dense
self.assertIn("Total params: 22", summary_content)
self.assertIn("Trainable params: 22", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
def test_print_model_summary_op_as_layer(self):
inputs = layers.Input((2,))
x = layers.Dense(4)(inputs)
outputs = ops.mean(x)
model = models.Model(inputs, outputs)
summary_content = []
def print_to_variable(text, line_break=False):
summary_content.append(text)
summary_utils.print_summary(
model, print_fn=print_to_variable, show_trainable=True
)
summary_content = "\n".join(summary_content)
self.assertIn("(None, 4)", summary_content) # dense
self.assertIn("Y", summary_content) # dense
self.assertIn("()", summary_content) # mean
self.assertIn("-", summary_content) # mean
self.assertIn("Total params: 12", summary_content)
self.assertIn("Trainable params: 12", summary_content)
self.assertIn("Non-trainable params: 0", summary_content)
|
# pants requires this import to recognize the dep
import pytest_asyncio # noqa: F401
import pytest
import os
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
# Save the current value of the environment variable, if it exists
val = os.environ.get(var, None)
# Remove the environment variable to simulate it being masked during the test
if val is not None:
del os.environ[var]
try:
# Yield the original value so it can be used in the test
yield val
finally:
# Restore the original environment variable if it was set
if val is not None:
os.environ[var] = val
else:
# If the variable was not originally set, ensure it's removed
if var in os.environ:
del os.environ[var]
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
from llama_index.llms.nvidia import NVIDIA
from llama_index.llms.nvidia.base import DEFAULT_MODEL
mode = get_mode(metafunc.config)
if "chat_model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in NVIDIA(**mode).available_models]
metafunc.parametrize("chat_model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
import pytest
import os
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
# Save the current value of the environment variable, if it exists
val = os.environ.get(var, None)
# Remove the environment variable to simulate it being masked during the test
if val is not None:
del os.environ[var]
try:
# Yield the original value so it can be used in the test
yield val
finally:
# Restore the original environment variable if it was set
if val is not None:
os.environ[var] = val
else:
# If the variable was not originally set, ensure it's removed
if var in os.environ:
del os.environ[var]
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
from llama_index.llms.nvidia import NVIDIA
from llama_index.llms.nvidia.base import DEFAULT_MODEL
mode = get_mode(metafunc.config)
if "chat_model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in NVIDIA(**mode).available_models]
metafunc.parametrize("chat_model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import sparse_plus
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_shrink
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_sigmoid
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import soft_shrink
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import squareplus
from keras.src.activations.activations import tanh
from keras.src.activations.activations import tanh_shrink
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
|
"""
Python polyfills for sys
"""
from __future__ import annotations
import sys
from ..decorators import substitute_in_graph
__all__ = [
"intern",
"getrecursionlimit",
]
@substitute_in_graph(sys.intern, can_constant_fold_through=True)
def intern(string: str, /) -> str:
return string
@substitute_in_graph(sys.getrecursionlimit, can_constant_fold_through=True)
def getrecursionlimit() -> int:
return sys.getrecursionlimit()
@substitute_in_graph(sys.get_int_max_str_digits, can_constant_fold_through=True)
def get_int_max_str_digits() -> int:
return sys.get_int_max_str_digits()
|
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`.
This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form
```
<{url}|New failed tests>
{
"GH_ydshieh": {
"vit": 1
}
}
```
"""
import datetime
import json
import os
from collections import Counter
from copy import deepcopy
from huggingface_hub import HfApi
if __name__ == "__main__":
api = HfApi()
with open("new_model_failures_with_bad_commit.json") as fp:
data = json.load(fp)
with open("ci_results_run_models_gpu/model_job_links.json") as fp:
model_job_links = json.load(fp)
# TODO: extend
team_members = [
"ydshieh",
"zucchini-nlp",
"ArthurZucker",
"gante",
"LysandreJik",
"molbap",
"qubvel",
"Rocketknight1",
"muellerzr",
"SunMarc",
]
# Counting the number of failures grouped by authors
new_data = {}
for model, model_result in data.items():
for device, failed_tests in model_result.items():
for failed_test in failed_tests:
author = failed_test["author"]
if author not in team_members:
author = failed_test["merged_by"]
if author not in new_data:
new_data[author] = Counter()
new_data[author].update([model])
for author in new_data:
new_data[author] = dict(new_data[author])
# Group by author
new_data_full = {author: deepcopy(data) for author in new_data}
for author, _data in new_data_full.items():
for model, model_result in _data.items():
for device, failed_tests in model_result.items():
# prepare job_link and add it to each entry of new failed test information.
# need to change from `single-gpu` to `single` and same for `multi-gpu` to match `job_link`.
job_link = model_job_links[model][device.replace("-gpu", "")]
failed_tests = [x for x in failed_tests if x["author"] == author or x["merged_by"] == author]
for x in failed_tests:
x.update({"job_link": job_link})
model_result[device] = failed_tests
_data[model] = {k: v for k, v in model_result.items() if len(v) > 0}
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
# Upload to Hub and get the url
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
commit_info = api.upload_file(
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
repo_id="hf-internal-testing/transformers_daily_ci",
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
# Add `GH_` prefix as keyword mention
output = {}
for author, item in new_data.items():
author = f"GH_{author}"
output[author] = item
report = f"<{url}|New failed tests>\\n\\n"
report += json.dumps(output, indent=4).replace('"', '\\"').replace("\n", "\\n")
print(report)
|
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`.
This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form
```
<{url}|New failed tests>
{
"GH_ydshieh": {
"vit": 1
}
}
```
"""
import datetime
import json
import os
from collections import Counter
from copy import deepcopy
from huggingface_hub import HfApi
if __name__ == "__main__":
api = HfApi()
with open("new_model_failures_with_bad_commit.json") as fp:
data = json.load(fp)
# TODO: extend
team_members = [
"ydshieh",
"zucchini-nlp",
"ArthurZucker",
"gante",
"LysandreJik",
"molbap",
"qubvel",
"Rocketknight1",
"muellerzr",
"SunMarc",
]
# Counting the number of failures grouped by authors
new_data = {}
for model, model_result in data.items():
for device, failed_tests in model_result.items():
for failed_test in failed_tests:
author = failed_test["author"]
if author not in team_members:
author = failed_test["merged_by"]
if author not in new_data:
new_data[author] = Counter()
new_data[author].update([model])
for author in new_data:
new_data[author] = dict(new_data[author])
# Group by author
new_data_full = {author: deepcopy(data) for author in new_data}
for author, _data in new_data_full.items():
for model, model_result in _data.items():
for device, failed_tests in model_result.items():
failed_tests = [x for x in failed_tests if x["author"] == author or x["merged_by"] == author]
model_result[device] = failed_tests
_data[model] = {k: v for k, v in model_result.items() if len(v) > 0}
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
# Upload to Hub and get the url
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
commit_info = api.upload_file(
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
repo_id="hf-internal-testing/transformers_daily_ci",
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
# Add `GH_` prefix as keyword mention
output = {}
for author, item in new_data.items():
author = f"GH_{author}"
output[author] = item
report = f"<{url}|New failed tests>\\n\\n"
report += json.dumps(output, indent=4).replace('"', '\\"').replace("\n", "\\n")
print(report)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras.preprocessing import image
from keras._tf_keras.keras.preprocessing import sequence
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras.preprocessing import image
from keras._tf_keras.keras.preprocessing import sequence
from keras._tf_keras.keras.preprocessing import text
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.preprocessing import image
from keras.api.preprocessing import sequence
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""Memory used to save agent output AND intermediate steps."""
from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain.agents.format_scratchpad import (
format_to_openai_function_messages,
format_to_tool_messages,
)
from langchain.memory.chat_memory import BaseChatMemory
class AgentTokenBufferMemory(BaseChatMemory): # type: ignore[override]
"""Memory used to save agent output AND intermediate steps.
Parameters:
human_prefix: Prefix for human messages. Default is "Human".
ai_prefix: Prefix for AI messages. Default is "AI".
llm: Language model.
memory_key: Key to save memory under. Default is "history".
max_token_limit: Maximum number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest
messages will be pruned. Default is 12000.
return_messages: Whether to return messages. Default is True.
output_key: Key to save output under. Default is "output".
intermediate_steps_key: Key to save intermediate steps under.
Default is "intermediate_steps".
format_as_tools: Whether to format as tools. Default is False.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 12000
"""The max number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest messages will be pruned."""
return_messages: bool = True
output_key: str = "output"
intermediate_steps_key: str = "intermediate_steps"
format_as_tools: bool = False
@property
def buffer(self) -> list[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> list[str]:
"""Always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer.
Args:
inputs: Inputs to the agent.
Returns:
A dictionary with the history buffer.
"""
if self.return_messages:
final_buffer: Any = self.buffer
else:
final_buffer = get_buffer_string(
self.buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
def save_context(self, inputs: dict[str, Any], outputs: dict[str, Any]) -> None:
"""Save context from this conversation to buffer. Pruned.
Args:
inputs: Inputs to the agent.
outputs: Outputs from the agent.
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
format_to_messages = (
format_to_tool_messages
if self.format_as_tools
else format_to_openai_function_messages
)
steps = format_to_messages(outputs[self.intermediate_steps_key])
for msg in steps:
self.chat_memory.add_message(msg)
self.chat_memory.add_ai_message(output_str)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
"""Memory used to save agent output AND intermediate steps."""
from typing import Any, Dict, List
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain.agents.format_scratchpad import (
format_to_openai_function_messages,
format_to_tool_messages,
)
from langchain.memory.chat_memory import BaseChatMemory
class AgentTokenBufferMemory(BaseChatMemory): # type: ignore[override]
"""Memory used to save agent output AND intermediate steps.
Parameters:
human_prefix: Prefix for human messages. Default is "Human".
ai_prefix: Prefix for AI messages. Default is "AI".
llm: Language model.
memory_key: Key to save memory under. Default is "history".
max_token_limit: Maximum number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest
messages will be pruned. Default is 12000.
return_messages: Whether to return messages. Default is True.
output_key: Key to save output under. Default is "output".
intermediate_steps_key: Key to save intermediate steps under.
Default is "intermediate_steps".
format_as_tools: Whether to format as tools. Default is False.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 12000
"""The max number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest messages will be pruned."""
return_messages: bool = True
output_key: str = "output"
intermediate_steps_key: str = "intermediate_steps"
format_as_tools: bool = False
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer.
Args:
inputs: Inputs to the agent.
Returns:
A dictionary with the history buffer.
"""
if self.return_messages:
final_buffer: Any = self.buffer
else:
final_buffer = get_buffer_string(
self.buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, Any]) -> None:
"""Save context from this conversation to buffer. Pruned.
Args:
inputs: Inputs to the agent.
outputs: Outputs from the agent.
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_user_message(input_str)
format_to_messages = (
format_to_tool_messages
if self.format_as_tools
else format_to_openai_function_messages
)
steps = format_to_messages(outputs[self.intermediate_steps_key])
for msg in steps:
self.chat_memory.add_message(msg)
self.chat_memory.add_ai_message(output_str)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.block import get_block
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.executor.utils import UsageTransactionMetadata, block_usage_cost
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
async def spend_credits(entry: NodeExecutionEntry) -> int:
block = get_block(entry.block_id)
if not block:
raise RuntimeError(f"Block {entry.block_id} not found")
cost, matching_filter = block_usage_cost(block=block, input_data=entry.data)
await user_credit.spend_credits(
entry.user_id,
cost,
UsageTransactionMetadata(
graph_exec_id=entry.graph_exec_id,
graph_id=entry.graph_id,
node_id=entry.node_id,
node_exec_id=entry.node_exec_id,
block_id=entry.block_id,
block=entry.block_id,
input=matching_filter,
),
)
return cost
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
)
assert spending_amount_1 > 0
spending_amount_2 = await spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime, timezone
import pytest
from prisma.enums import CreditTransactionType
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import BetaUserCredit
from backend.data.execution import NodeExecutionEntry
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = BetaUserCredit(REFILL_VALUE)
async def disable_test_user_transactions():
await CreditTransaction.prisma().delete_many(where={"userId": DEFAULT_USER_ID})
async def top_up(amount: int):
await user_credit._add_transaction(
DEFAULT_USER_ID,
amount,
CreditTransactionType.TOP_UP,
)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
await disable_test_user_transactions()
await top_up(100)
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
),
0.0,
0.0,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
NodeExecutionEntry(
user_id=DEFAULT_USER_ID,
graph_id="test_graph",
node_id="test_node",
graph_exec_id="test_graph_exec",
node_exec_id="test_node_exec",
block_id=AITextGeneratorBlock().id,
data={"model": "gpt-4-turbo", "api_key": "owned_api_key"},
),
0.0,
0.0,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
await disable_test_user_transactions()
current_credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
new_credit = await user_credit.get_credits(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
await disable_test_user_transactions()
month1 = 1
month2 = 2
# set the calendar to month 2 but use current time from now
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
month2credit = await user_credit.get_credits(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month1, day=1
)
month1credit = await user_credit.get_credits(DEFAULT_USER_ID)
await top_up(100)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: datetime.now(timezone.utc).replace(
month=month2, day=1
)
assert await user_credit.get_credits(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
await disable_test_user_transactions()
balance = await user_credit.get_credits(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
import os
from jina import Flow, DocumentArray, Document
# noinspection PyUnresolvedReferences
from jinahub.indexers.DocCache.doc_cache import DocCache
from jinahub.indexers.storage.LMDBStorage.lmdb_storage import LMDBStorage
def test_cache(tmpdir):
os.environ['CACHE_WORKSPACE'] = os.path.join(tmpdir, 'cache')
os.environ['STORAGE_WORKSPACE'] = os.path.join(tmpdir, 'indexer')
docs = [
Document(id=1, content='a'),
Document(id=2, content='a'),
Document(id=3, content='a'),
]
with Flow().add(uses='cache.yml').add(uses='storage.yml') as f:
response = f.post(on='/index', inputs=DocumentArray(docs), return_results=True)
assert len(response[0].docs) == 1
storage = LMDBStorage(
metas={'workspace': os.environ['STORAGE_WORKSPACE'], 'name': 'storage'},
runtime_args={'pea_id': 0},
)
assert storage.size == 1
docs = [
Document(id=1, content='b'),
Document(id=2, content='b'),
Document(id=3, content='b'),
]
f.post(
on='/update',
inputs=DocumentArray(docs),
)
storage = LMDBStorage(
metas={'workspace': os.environ['STORAGE_WORKSPACE'], 'name': 'storage'},
runtime_args={'pea_id': 0},
)
assert storage.size == 1
f.post(
on='/delete',
inputs=DocumentArray(docs),
)
storage = LMDBStorage(
metas={'workspace': os.environ['STORAGE_WORKSPACE'], 'name': 'storage'},
runtime_args={'pea_id': 0},
)
assert storage.size == 0
|
import os
from jina import Flow, DocumentArray, Document
# noinspection PyUnresolvedReferences
from jinahub.indexers.DocCache import DocCache
from jinahub.indexers.storage.LMDBStorage import LMDBStorage
def test_cache(tmpdir):
os.environ['CACHE_WORKSPACE'] = os.path.join(tmpdir, 'cache')
os.environ['STORAGE_WORKSPACE'] = os.path.join(tmpdir, 'indexer')
docs = [
Document(id=1, content='a'),
Document(id=2, content='a'),
Document(id=3, content='a'),
]
with Flow().add(uses='cache.yml').add(uses='storage.yml') as f:
response = f.post(on='/index', inputs=DocumentArray(docs), return_results=True)
assert len(response[0].docs) == 1
storage = LMDBStorage(
metas={'workspace': os.environ['STORAGE_WORKSPACE'], 'name': 'storage'},
runtime_args={'pea_id': 0},
)
assert storage.size == 1
docs = [
Document(id=1, content='b'),
Document(id=2, content='b'),
Document(id=3, content='b'),
]
f.post(
on='/update',
inputs=DocumentArray(docs),
)
storage = LMDBStorage(
metas={'workspace': os.environ['STORAGE_WORKSPACE'], 'name': 'storage'},
runtime_args={'pea_id': 0},
)
assert storage.size == 1
f.post(
on='/delete',
inputs=DocumentArray(docs),
)
storage = LMDBStorage(
metas={'workspace': os.environ['STORAGE_WORKSPACE'], 'name': 'storage'},
runtime_args={'pea_id': 0},
)
assert storage.size == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.config import ConfigDict
from mmdet.core.utils import OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone: ConfigDict,
rpn_head: ConfigDict,
roi_head: ConfigDict,
train_cfg: ConfigDict,
test_cfg: ConfigDict,
neck: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone: Union[ConfigDict, dict],
rpn_head: Union[ConfigDict, dict],
roi_head: Union[ConfigDict, dict],
train_cfg: Union[ConfigDict, dict],
test_cfg: Union[ConfigDict, dict],
neck: Optional[Union[ConfigDict, dict]] = None,
pretrained: Optional[str] = None,
preprocess_cfg: Optional[Union[ConfigDict, dict]] = None,
init_cfg: Optional[Union[ConfigDict, dict]] = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
preprocess_cfg=preprocess_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
build_detector, build_head, build_loss, build_neck,
build_roi_extractor, build_shared_head)
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .layers import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
from .task_modules import * # noqa: F401,F403
from .test_time_augs import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
'build_shared_head', 'build_head', 'build_loss', 'build_detector'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
build_detector, build_head, build_loss, build_neck,
build_roi_extractor, build_shared_head)
from .data_preprocessors import * # noqa: F401,F403
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .plugins import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
from .seg_heads import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
'build_shared_head', 'build_head', 'build_loss', 'build_detector'
]
|
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py'
# learning policy
max_epochs = 24
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=1)
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = [
'../common/mstrain-poly_3x_coco_instance.py',
'../_base_/models/mask_rcnn_r50_fpn.py'
]
model = dict(
backbone=dict(
depth=101,
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='range',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.26.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.26.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
gen_kwargs: Optional[dict] = None,
**kwargs,
):
super().__init__(
features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, **kwargs
)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
# Build iterable dataset
if self.streaming:
dataset = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
dataset = self.builder.as_dataset(
split="train", ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class GeneratorDatasetInputStream(AbstractDatasetInputStream):
def __init__(
self,
generator: Callable,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
gen_kwargs: Optional[dict] = None,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Generator(
cache_dir=cache_dir,
features=features,
generator=generator,
gen_kwargs=gen_kwargs,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
ignore_verifications = False
use_auth_token = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", ignore_verifications=ignore_verifications, in_memory=self.keep_in_memory
)
return dataset
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
if IS_PYDANTIC_V1:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore[assignment,misc]
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore[valid-type]
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
"""Parser for JSON output."""
from __future__ import annotations
import json
from json import JSONDecodeError
from typing import Annotated, Any, Optional, TypeVar, Union
import jsonpatch # type: ignore[import]
import pydantic
from pydantic import SkipValidation
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers.format_instructions import JSON_FORMAT_INSTRUCTIONS
from langchain_core.output_parsers.transform import BaseCumulativeTransformOutputParser
from langchain_core.outputs import Generation
from langchain_core.utils.json import (
parse_and_check_json_markdown,
parse_json_markdown,
parse_partial_json,
)
from langchain_core.utils.pydantic import IS_PYDANTIC_V1
if IS_PYDANTIC_V1:
PydanticBaseModel = pydantic.BaseModel
else:
from pydantic.v1 import BaseModel
# Union type needs to be last assignment to PydanticBaseModel to make mypy happy.
PydanticBaseModel = Union[BaseModel, pydantic.BaseModel] # type: ignore
TBaseModel = TypeVar("TBaseModel", bound=PydanticBaseModel)
class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
"""Parse the output of an LLM call to a JSON object.
When used in streaming mode, it will yield partial JSON objects containing
all the keys that have been returned so far.
In streaming, if `diff` is set to `True`, yields JSONPatch operations
describing the difference between the previous and the current object.
"""
pydantic_object: Annotated[Optional[type[TBaseModel]], SkipValidation()] = None # type: ignore
"""The Pydantic object to use for validation.
If None, no validation is performed."""
def _diff(self, prev: Optional[Any], next: Any) -> Any:
return jsonpatch.make_patch(prev, next).patch
def _get_schema(self, pydantic_object: type[TBaseModel]) -> dict[str, Any]:
if issubclass(pydantic_object, pydantic.BaseModel):
return pydantic_object.model_json_schema()
if issubclass(pydantic_object, pydantic.v1.BaseModel):
return pydantic_object.schema()
return None
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
If True, the output will be a JSON object containing
all the keys that have been returned so far.
If False, the output will be the full JSON object.
Default is False.
Returns:
The parsed JSON object.
Raises:
OutputParserException: If the output is not valid JSON.
"""
text = result[0].text
text = text.strip()
if partial:
try:
return parse_json_markdown(text)
except JSONDecodeError:
return None
else:
try:
return parse_json_markdown(text)
except JSONDecodeError as e:
msg = f"Invalid json output: {text}"
raise OutputParserException(msg, llm_output=text) from e
def parse(self, text: str) -> Any:
"""Parse the output of an LLM call to a JSON object.
Args:
text: The output of the LLM call.
Returns:
The parsed JSON object.
"""
return self.parse_result([Generation(text=text)])
def get_format_instructions(self) -> str:
"""Return the format instructions for the JSON output.
Returns:
The format instructions for the JSON output.
"""
if self.pydantic_object is None:
return "Return a JSON object."
# Copy schema to avoid altering original Pydantic schema.
schema = dict(self._get_schema(self.pydantic_object).items())
# Remove extraneous fields.
reduced_schema = schema
if "title" in reduced_schema:
del reduced_schema["title"]
if "type" in reduced_schema:
del reduced_schema["type"]
# Ensure json in context is well-formed with double quotes.
schema_str = json.dumps(reduced_schema, ensure_ascii=False)
return JSON_FORMAT_INSTRUCTIONS.format(schema=schema_str)
@property
def _type(self) -> str:
return "simple_json_output_parser"
# For backwards compatibility
SimpleJsonOutputParser = JsonOutputParser
__all__ = [
"JsonOutputParser",
"SimpleJsonOutputParser", # For backwards compatibility
"parse_partial_json", # For backwards compatibility
"parse_and_check_json_markdown", # For backwards compatibility
]
|
from typing import Any, Dict, Union
from torchvision import datapoints
from torchvision.transforms.v2 import functional as F, Transform
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ClampBoundingBox(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. v2betastatus:: ClampBoundingBox transform
"""
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from typing import Any, Dict, Union
import torch
from torchvision import datapoints, transforms as _transforms
from torchvision.transforms.v2 import functional as F, Transform
from .utils import is_simple_tensor
class ConvertBoundingBoxFormat(Transform):
"""[BETA] Convert bounding box coordinates to the given ``format``, eg from "CXCYWH" to "XYXY".
.. v2betastatus:: ConvertBoundingBoxFormat transform
Args:
format (str or datapoints.BoundingBoxFormat): output bounding box format.
Possible values are defined by :class:`~torchvision.datapoints.BoundingBoxFormat` and
string values match the enums, e.g. "XYXY" or "XYWH" etc.
"""
_transformed_types = (datapoints.BoundingBox,)
def __init__(self, format: Union[str, datapoints.BoundingBoxFormat]) -> None:
super().__init__()
if isinstance(format, str):
format = datapoints.BoundingBoxFormat[format]
self.format = format
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.convert_format_bounding_box(inpt, new_format=self.format) # type: ignore[return-value]
class ConvertImageDtype(Transform):
"""[BETA] Convert input image to the given ``dtype`` and scale the values accordingly.
.. v2betastatus:: ConvertImageDtype transform
.. warning::
Consider using ``ToDtype(dtype, scale=True)`` instead. See :class:`~torchvision.transforms.v2.ToDtype`.
This function does not support PIL Image.
Args:
dtype (torch.dtype): Desired data type of the output
.. note::
When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
If converted back and forth, this mismatch has no effect.
Raises:
RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
of the integer ``dtype``.
"""
_v1_transform_cls = _transforms.ConvertImageDtype
_transformed_types = (is_simple_tensor, datapoints.Image)
def __init__(self, dtype: torch.dtype = torch.float32) -> None:
super().__init__()
self.dtype = dtype
def _transform(self, inpt: Any, params: Dict[str, Any]) -> Any:
return F.to_dtype(inpt, dtype=self.dtype, scale=True)
class ClampBoundingBox(Transform):
"""[BETA] Clamp bounding boxes to their corresponding image dimensions.
The clamping is done according to the bounding boxes' ``spatial_size`` meta-data.
.. v2betastatus:: ClampBoundingBox transform
"""
_transformed_types = (datapoints.BoundingBox,)
def _transform(self, inpt: datapoints.BoundingBox, params: Dict[str, Any]) -> datapoints.BoundingBox:
return F.clamp_bounding_box(inpt) # type: ignore[return-value]
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
def normalized_mean_squared_error(
reconstruction: torch.Tensor,
original_input: torch.Tensor,
) -> torch.Tensor:
"""
:param reconstruction: output of Autoencoder.decode (shape: [batch, n_inputs])
:param original_input: input of Autoencoder.encode (shape: [batch, n_inputs])
:return: normalized mean squared error (shape: [1])
"""
return (((reconstruction - original_input) ** 2).mean(dim=1) / (original_input**2).mean(dim=1)).mean()
class ReconstructionLoss(nn.Module):
"""
Reconstruction Loss module for Sparse AutoEncoder.
This module computes the reconstruction loss according to the formula:
L_recon = L(k) + L(4k)/8 + β*L_aux
where:
- L(k) = ||f(x) - f(dx)_k||₂²
- L(4k) = ||f(x) - f(dx)_4k||₂²
- L_aux = ||e - ê||₂², e = f(x) - f(dx), ê = W_dec*z
"""
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the Reconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the reconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
x = features["sentence_embedding_backbone"]
recons_k = features["decoded_embedding_k"]
recons_4k = features["decoded_embedding_4k"]
recons_aux = features["decoded_embedding_aux"]
reconsk_pre_bias = features["decoded_embedding_k_pre_bias"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(x, recons_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(x, recons_4k)
# L_aux = ||e - ê||₂²
L_aux = normalized_mean_squared_error(recons_aux, x - reconsk_pre_bias)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
}
|
from __future__ import annotations
from collections.abc import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
from sentence_transformers.sparse_encoder import SparseEncoder
class ReconstructionLoss(nn.Module):
"""
Reconstruction Loss module for Sparse AutoEncoder.
This module computes the reconstruction loss according to the formula:
L_recon = L(k) + L(4k)/8 + β*L_aux
where:
- L(k) = ||f(x) - f(dx)_k||₂²
- L(4k) = ||f(x) - f(dx)_4k||₂²
- L_aux = ||e - ê||₂², e = f(x) - f(dx), ê = W_dec*z
"""
def __init__(self, model: SparseEncoder, beta: float = 1.0) -> None:
super().__init__()
self.model = model
self.beta = beta
def forward(self, sentence_features: Iterable[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Forward pass of the Reconstruction Loss module.
This method is used when the loss is computed as part of the model's forward pass.
Args:
sentence_features: Iterable of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Compute embeddings using the model
outputs = [self.model(sentence_feature) for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(outputs)
def compute_loss_from_embeddings(self, outputs: list[dict[str, torch.Tensor]]) -> dict[str, torch.Tensor]:
"""
Compute the reconstruction loss from embeddings.
Args:
outputs: List of dictionaries containing sentence embeddings and their sparse representations
Returns:
Dictionary containing the total loss and individual loss components
"""
# Initialize loss components
total_L_k = 0.0
total_L_4k = 0.0
total_L_aux = 0.0
# Process each sentence feature
for features in outputs:
f_x = features["sentence_embedding_backbone"]
x_hat_k = features["decoded_embedding_k"]
x_hat_4k = features["decoded_embedding_4k"]
e = features["error"]
e_hat = features["error_hat"]
# L(k) = ||f(x) - f(dx)_k||₂²
L_k = F.mse_loss(f_x, x_hat_k)
# L(4k) = ||f(x) - f(dx)_4k||₂²
L_4k = F.mse_loss(f_x, x_hat_4k)
# L_aux = ||e - ê||₂²
L_aux = F.mse_loss(e, e_hat)
# Accumulate losses
total_L_k += L_k
total_L_4k += L_4k
total_L_aux += L_aux
# Average losses over batch
batch_size = len(outputs)
if batch_size > 0:
total_L_k /= batch_size
total_L_4k /= batch_size
total_L_aux /= batch_size
# Total loss: L_recon = L(k) + L(4k)/8 + β*L_aux
total_loss = total_L_k + total_L_4k / 8 + self.beta * total_L_aux
return total_loss
def get_config_dict(self):
"""
Get the configuration dictionary.
Returns:
Dictionary containing the configuration parameters
"""
return {
"beta": self.beta,
}
|
from typing import TYPE_CHECKING, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
from docarray.typing.proto_register import _register_proto
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
@_register_proto(proto_type_name='id')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(text=self, type=self._proto_type_name)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
from typing import TYPE_CHECKING, Type, TypeVar, Union
from uuid import UUID
from pydantic import BaseConfig, parse_obj_as
from pydantic.fields import ModelField
if TYPE_CHECKING:
from docarray.proto import NodeProto
from docarray.typing.abstract_type import AbstractType
T = TypeVar('T', bound='ID')
class ID(str, AbstractType):
"""
Represent an unique ID
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[str, int, UUID],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
try:
id: str = str(value)
return cls(id)
except Exception:
raise ValueError(f'Expected a str, int or UUID, got {type(value)}')
def _to_node_protobuf(self) -> 'NodeProto':
"""Convert an ID into a NodeProto message. This function should
be called when the self is nested into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
from docarray.proto import NodeProto
return NodeProto(id=self)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'str') -> T:
"""
read ndarray from a proto msg
:param pb_msg:
:return: a string
"""
return parse_obj_as(cls, pb_msg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .local_visualizer import DetLocalVisualizer, TrackLocalVisualizer
from .palette import get_palette, jitter_color, palette_val
__all__ = [
'palette_val', 'get_palette', 'DetLocalVisualizer', 'jitter_color',
'TrackLocalVisualizer'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .local_visualizer import DetLocalVisualizer
from .palette import get_palette, jitter_color, palette_val
__all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer', 'jitter_color']
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict, Tuple
import numpy as np
import pytest
from executor.torch_encoder import ImageTorchEncoder
from jina import Document, DocumentArray, Executor
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize(
['content', 'out_shape'],
[
([np.ones((10, 10, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((360, 420, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((300, 300, 3), dtype=np.uint8), (3, 224, 224)]),
],
)
def test_preprocessing_reshape_correct(content: np.ndarray, out_shape: Tuple):
encoder = ImageTorchEncoder()
reshaped_content = encoder._preprocess(content)
assert (
reshaped_content.shape == out_shape
), f'Expected shape {out_shape} but got {reshaped_content.shape}'
@pytest.mark.parametrize(
'traversal_paths, docs',
[
(('r',), pytest.lazy_fixture('docs_with_blobs')),
(('c',), pytest.lazy_fixture('docs_with_chunk_blobs')),
],
)
def test_encode_image_returns_correct_length(
traversal_paths: Tuple[str], docs: DocumentArray
) -> None:
encoder = ImageTorchEncoder(default_traversal_path=traversal_paths)
encoder.encode(docs=docs, parameters={})
for doc in docs.traverse_flat(traversal_paths):
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.gpu
def test_encode_gpu(docs_with_blobs: DocumentArray) -> None:
encoder = ImageTorchEncoder(default_traversal_path=('r',), device='cuda')
encoder.encode(docs=docs_with_blobs, parameters={})
for doc in docs_with_blobs.traverse_flat(('r',)):
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['resnet50', 'mobilenet_v3_large', 'googlenet'])
def test_encodes_semantic_meaning(test_images: Dict[str, np.array], model_name: str):
encoder = ImageTorchEncoder(model_name=model_name)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
def test_no_preprocessing():
encoder = ImageTorchEncoder(use_default_preprocessing=False)
# without pre-processing the user needs to provide the right shape for the model directly
arr_in = np.ones((3, 224, 224), dtype=np.float32)
docs = DocumentArray([Document(blob=arr_in)])
encoder.encode(docs=docs, parameters={})
assert docs[0].embedding.shape == (512,)
def test_empty_doc_array():
docs = DocumentArray()
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert len(docs) == 0
def test_docs_array_with_no_text():
docs = DocumentArray([Document(text='hello world')])
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding is None
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from typing import Dict, Tuple
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor
from ...torch_encoder import ImageTorchEncoder
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
@pytest.mark.parametrize(
['content', 'out_shape'],
[
([np.ones((10, 10, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((360, 420, 3), dtype=np.uint8), (3, 224, 224)]),
([np.ones((300, 300, 3), dtype=np.uint8), (3, 224, 224)]),
],
)
def test_preprocessing_reshape_correct(content: np.ndarray, out_shape: Tuple):
encoder = ImageTorchEncoder()
reshaped_content = encoder._preprocess(content)
assert (
reshaped_content.shape == out_shape
), f'Expected shape {out_shape} but got {reshaped_content.shape}'
@pytest.mark.parametrize(
'traversal_paths, docs',
[
(('r',), pytest.lazy_fixture('docs_with_blobs')),
(('c',), pytest.lazy_fixture('docs_with_chunk_blobs')),
],
)
def test_encode_image_returns_correct_length(
traversal_paths: Tuple[str], docs: DocumentArray
) -> None:
encoder = ImageTorchEncoder(default_traversal_path=traversal_paths)
encoder.encode(docs=docs, parameters={})
for doc in docs.traverse_flat(traversal_paths):
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.gpu
def test_encode_gpu(docs_with_blobs: DocumentArray) -> None:
encoder = ImageTorchEncoder(default_traversal_path=('r',), device='cuda')
encoder.encode(docs=docs_with_blobs, parameters={})
for doc in docs_with_blobs.traverse_flat(('r',)):
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.parametrize('model_name', ['resnet50', 'mobilenet_v3_large', 'googlenet'])
def test_encodes_semantic_meaning(test_images: Dict[str, np.array], model_name: str):
encoder = ImageTorchEncoder(model_name=model_name)
embeddings = {}
for name, image_arr in test_images.items():
docs = DocumentArray([Document(blob=image_arr)])
encoder.encode(docs, parameters={})
embeddings[name] = docs[0].embedding
def dist(a, b):
a_embedding = embeddings[a]
b_embedding = embeddings[b]
return np.linalg.norm(a_embedding - b_embedding)
small_distance = dist('banana1', 'banana2')
assert small_distance < dist('banana1', 'airplane')
assert small_distance < dist('banana1', 'satellite')
assert small_distance < dist('banana1', 'studio')
assert small_distance < dist('banana2', 'airplane')
assert small_distance < dist('banana2', 'satellite')
assert small_distance < dist('banana2', 'studio')
assert small_distance < dist('airplane', 'studio')
assert small_distance < dist('airplane', 'satellite')
assert small_distance < dist('studio', 'satellite')
def test_no_preprocessing():
encoder = ImageTorchEncoder(use_default_preprocessing=False)
# without pre-processing the user needs to provide the right shape for the model directly
arr_in = np.ones((3, 224, 224), dtype=np.float32)
docs = DocumentArray([Document(blob=arr_in)])
encoder.encode(docs=docs, parameters={})
assert docs[0].embedding.shape == (512,)
def test_empty_doc_array():
docs = DocumentArray()
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert len(docs) == 0
def test_docs_array_with_no_text():
docs = DocumentArray([Document(text='hello world')])
encoder = ImageTorchEncoder()
encoder.encode(docs, parameters={})
assert docs[0].embedding is None
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
# TODO verify after refactoring analyze_results.py
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add_argument(
'image_dir',
help='directory where result '
'images save path generated by ‘analyze_results.py’')
parser.add_argument(
'--out',
type=str,
default='result.gif',
help='gif path where will be saved')
args = parser.parse_args()
return args
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def create_gif(frames, gif_name, duration=2):
"""Create gif through imageio.
Args:
frames (list[ndarray]): Image frames
gif_name (str): Saved gif name
duration (int): Display interval (s),
Default: 2
"""
if imageio is None:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
def create_frame_by_matplotlib(image_dir,
nrows=1,
fig_size=(300, 300),
font_size=15):
"""Create gif frame image through matplotlib.
Args:
image_dir (str): Root directory of result images
nrows (int): Number of rows displayed, Default: 1
fig_size (tuple): Figure size of the pyplot figure.
Default: (300, 300)
font_size (int): Font size of texts. Default: 15
Returns:
list[ndarray]: image frames
"""
result_dir_names = os.listdir(image_dir)
assert len(result_dir_names) == 2
# Longer length has higher priority
result_dir_names.reverse()
images_list = []
for dir_names in result_dir_names:
images_list.append(mmcv.scandir(osp.join(image_dir, dir_names)))
frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
fig, axes = plt.subplots(nrows=nrows, ncols=2)
fig.suptitle('Good/bad case selected according '
'to the COCO mAP of the single image')
det_patch = mpatches.Patch(color='salmon', label='prediction')
gt_patch = mpatches.Patch(color='royalblue', label='ground truth')
# bbox_to_anchor may need to be finetuned
plt.legend(
handles=[det_patch, gt_patch],
bbox_to_anchor=(1, -0.18),
loc='lower right',
borderaxespad=0.)
if nrows == 1:
axes = [axes]
dpi = fig.get_dpi()
# set fig size and margin
fig.set_size_inches(
(fig_size[0] * 2 + fig_size[0] // 20) / dpi,
(fig_size[1] * nrows + fig_size[1] // 3) / dpi,
)
fig.tight_layout()
# set subplot margin
plt.subplots_adjust(
hspace=.05,
wspace=0.05,
left=0.02,
right=0.98,
bottom=0.02,
top=0.98)
for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):
image_path_left = osp.join(
osp.join(image_dir, result_dir_names[0], path_tuple[0]))
image_path_right = osp.join(
osp.join(image_dir, result_dir_names[1], path_tuple[1]))
image_left = mmcv.imread(image_path_left)
image_left = mmcv.rgb2bgr(image_left)
image_right = mmcv.imread(image_path_right)
image_right = mmcv.rgb2bgr(image_right)
if i == 0:
ax_tuple[0].set_title(
result_dir_names[0], fontdict={'size': font_size})
ax_tuple[1].set_title(
result_dir_names[1], fontdict={'size': font_size})
ax_tuple[0].imshow(
image_left, extent=(0, *fig_size, 0), interpolation='bilinear')
ax_tuple[0].axis('off')
ax_tuple[1].imshow(
image_right,
extent=(0, *fig_size, 0),
interpolation='bilinear')
ax_tuple[1].axis('off')
canvas = fig.canvas
s, (width, height) = canvas.print_to_buffer()
buffer = np.frombuffer(s, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
frames.append(img)
return frames
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(description='Create GIF for demo')
parser.add_argument(
'image_dir',
help='directory where result '
'images save path generated by ‘analyze_results.py’')
parser.add_argument(
'--out',
type=str,
default='result.gif',
help='gif path where will be saved')
args = parser.parse_args()
return args
def _generate_batch_data(sampler, batch_size):
batch = []
for idx in sampler:
batch.append(idx)
if len(batch) == batch_size:
yield batch
batch = []
if len(batch) > 0:
yield batch
def create_gif(frames, gif_name, duration=2):
"""Create gif through imageio.
Args:
frames (list[ndarray]): Image frames
gif_name (str): Saved gif name
duration (int): Display interval (s),
Default: 2
"""
if imageio is None:
raise RuntimeError('imageio is not installed,'
'Please use “pip install imageio” to install')
imageio.mimsave(gif_name, frames, 'GIF', duration=duration)
def create_frame_by_matplotlib(image_dir,
nrows=1,
fig_size=(300, 300),
font_size=15):
"""Create gif frame image through matplotlib.
Args:
image_dir (str): Root directory of result images
nrows (int): Number of rows displayed, Default: 1
fig_size (tuple): Figure size of the pyplot figure.
Default: (300, 300)
font_size (int): Font size of texts. Default: 15
Returns:
list[ndarray]: image frames
"""
result_dir_names = os.listdir(image_dir)
assert len(result_dir_names) == 2
# Longer length has higher priority
result_dir_names.reverse()
images_list = []
for dir_names in result_dir_names:
images_list.append(mmcv.scandir(osp.join(image_dir, dir_names)))
frames = []
for paths in _generate_batch_data(zip(*images_list), nrows):
fig, axes = plt.subplots(nrows=nrows, ncols=2)
fig.suptitle('Good/bad case selected according '
'to the COCO mAP of the single image')
det_patch = mpatches.Patch(color='salmon', label='prediction')
gt_patch = mpatches.Patch(color='royalblue', label='ground truth')
# bbox_to_anchor may need to be finetuned
plt.legend(
handles=[det_patch, gt_patch],
bbox_to_anchor=(1, -0.18),
loc='lower right',
borderaxespad=0.)
if nrows == 1:
axes = [axes]
dpi = fig.get_dpi()
# set fig size and margin
fig.set_size_inches(
(fig_size[0] * 2 + fig_size[0] // 20) / dpi,
(fig_size[1] * nrows + fig_size[1] // 3) / dpi,
)
fig.tight_layout()
# set subplot margin
plt.subplots_adjust(
hspace=.05,
wspace=0.05,
left=0.02,
right=0.98,
bottom=0.02,
top=0.98)
for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)):
image_path_left = osp.join(
osp.join(image_dir, result_dir_names[0], path_tuple[0]))
image_path_right = osp.join(
osp.join(image_dir, result_dir_names[1], path_tuple[1]))
image_left = mmcv.imread(image_path_left)
image_left = mmcv.rgb2bgr(image_left)
image_right = mmcv.imread(image_path_right)
image_right = mmcv.rgb2bgr(image_right)
if i == 0:
ax_tuple[0].set_title(
result_dir_names[0], fontdict={'size': font_size})
ax_tuple[1].set_title(
result_dir_names[1], fontdict={'size': font_size})
ax_tuple[0].imshow(
image_left, extent=(0, *fig_size, 0), interpolation='bilinear')
ax_tuple[0].axis('off')
ax_tuple[1].imshow(
image_right,
extent=(0, *fig_size, 0),
interpolation='bilinear')
ax_tuple[1].axis('off')
canvas = fig.canvas
s, (width, height) = canvas.print_to_buffer()
buffer = np.frombuffer(s, dtype='uint8')
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
img = rgb.astype('uint8')
frames.append(img)
return frames
def main():
args = parse_args()
frames = create_frame_by_matplotlib(args.image_dir)
create_gif(frames, args.out)
if __name__ == '__main__':
main()
|
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py']
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './mask2former_r50_lsj_8x2_50e_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import rms_normalization
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import polar
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import sparse_plus
from keras.src.ops.nn import sparsemax
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
from keras.src.ops.nn import threshold
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lz4, require_zstandard
def test_mockfs(mockfs):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_non_mockfs():
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def test_extract_path_from_uri():
mock_bucket = "mock-s3-bucket"
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem(mockfs):
is_remote = is_remote_filesystem(mockfs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = input_paths[compression_fs_class.protocol]
if input_path is None:
reason = f"for '{compression_fs_class.protocol}' compression protocol, "
if compression_fs_class.protocol == "lz4":
reason += require_lz4.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(reason)
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.glob("*") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol", ["zip", "gzip"])
def test_fs_isfile(protocol, zip_jsonl_path, jsonl_gz_path):
compressed_file_paths = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
compressed_file_path = compressed_file_paths[protocol]
member_file_path = "dataset.jsonl"
path = f"{protocol}://{member_file_path}::{compressed_file_path}"
fs, *_ = fsspec.get_fs_token_paths(path)
assert fs.isfile(member_file_path)
assert not fs.isfile("non_existing_" + member_file_path)
@pytest.mark.integration
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data"]
assert hffs.isdir("data")
assert hffs.isfile(".gitattributes") and hffs.isfile("data/text_data.txt")
with open(text_file) as f:
assert hffs.open("data/text_data.txt", "r").read() == f.read()
def test_fs_overwrites():
protocol = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(protocol, None, clobber=True)
with pytest.warns(UserWarning) as warning_info:
importlib.reload(datasets.filesystems)
assert len(warning_info) == 1
assert (
str(warning_info[0].message)
== f"A filesystem protocol was already set for {protocol} and will be overwritten."
)
|
import os
import shutil
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
os.system('scripts/download_full.sh')
yield
shutil.rmtree('.cache', ignore_errors=True)
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
import os
import shutil
import pytest
@pytest.fixture(scope="session", autouse=True)
def download_cache():
os.system('scripts/download_full.sh')
yield
shutil.rmtree('.cache', ignore_errors=True)
|
import pytest
from docarray.documents import Video
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = Video(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
|
import pytest
from docarray.documents import Video
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = Video(url=file_url)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False,
backend_args=backend_args)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
# If you don't have a gt annotation, delete the pipeline
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
# inference on test dataset and
# format the output results for submission.
# test_dataloader = dict(
# batch_size=1,
# num_workers=2,
# persistent_workers=True,
# drop_last=False,
# sampler=dict(type='DefaultSampler', shuffle=False),
# dataset=dict(
# type=dataset_type,
# data_root=data_root,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# data_prefix=dict(img='test2017/'),
# test_mode=True,
# pipeline=test_pipeline))
# test_evaluator = dict(
# type='CocoMetric',
# metric='bbox',
# format_only=True,
# ann_file=data_root + 'annotations/image_info_test-dev2017.json',
# outfile_prefix='./work_dirs/coco_detection/test')
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Cerebras(OpenAILike):
"""
Cerebras LLM.
Examples:
`pip install llama-index-llms-cerebras`
```python
from llama_index.llms.cerebras import Cerebras
# Set up the Cerebras class with the required model and API key
llm = Cerebras(model="llama-3.3-70b", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Why is fast inference important?")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = os.environ.get("CEREBRAS_BASE_URL", None)
or "https://api.cerebras.ai/v1/",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("CEREBRAS_API_KEY", None)
assert (
api_key is not None
), "API Key not specified! Please set `CEREBRAS_API_KEY`!"
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Cerebras"
|
import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
class Cerebras(OpenAILike):
"""
Cerebras LLM.
Examples:
`pip install llama-index-llms-cerebras`
```python
from llama_index.llms.cerebras import Cerebras
# Set up the Cerebras class with the required model and API key
llm = Cerebras(model="llama-3.3-70b", api_key="your_api_key")
# Call the complete method with a query
response = llm.complete("Why is fast inference important?")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: str = os.environ.get("CEREBRAS_BASE_URL", None)
or "https://api.cerebras.ai/v1/",
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("CEREBRAS_API_KEY", None)
assert (
api_key is not None
), "API Key not specified! Please set `CEREBRAS_API_KEY`!"
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Cerebras"
|
from ._dsp import adsr_envelope, extend_pitch, oscillator_bank
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"extend_pitch",
"fftconvolve",
"oscillator_bank",
]
|
from ._dsp import adsr_envelope, oscillator_bank
from .functional import add_noise, barkscale_fbanks, convolve, fftconvolve
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"fftconvolve",
"oscillator_bank",
]
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseRerankingEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation").select(range(1000))
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
# Print the results
print(f"Primary metric: {reranking_evaluator.primary_metric}")
print(f"Primary metric value: {results[reranking_evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseRerankingEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load a dataset with queries, positives, and negatives
eval_dataset = load_dataset("microsoft/ms_marco", "v1.1", split="validation")
samples = [
{
"query": sample["query"],
"positive": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if is_selected
],
"negative": [
text
for is_selected, text in zip(sample["passages"]["is_selected"], sample["passages"]["passage_text"])
if not is_selected
],
}
for sample in eval_dataset
]
# Now evaluate using only the documents from the 1000 samples
reranking_evaluator = SparseRerankingEvaluator(
samples=samples,
name="ms-marco-dev-small",
show_progress_bar=True,
batch_size=32,
)
results = reranking_evaluator(model)
print(results)
print(reranking_evaluator.primary_metric)
print(results[reranking_evaluator.primary_metric])
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
from mmengine.config import Config
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mkdir_or_exist(metrics_out)
dump(result_dict, osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Gather benchmarked models metric')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'root',
type=str,
help='root path of benchmarked models to be gathered')
parser.add_argument(
'--out', type=str, help='output path of gathered metrics to be stored')
parser.add_argument(
'--not-show', action='store_true', help='not show metrics')
parser.add_argument(
'--show-all', action='store_true', help='show all model metrics')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
root_path = args.root
metrics_out = args.out
result_dict = {}
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
record_metrics = model_info['metric']
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
metric_json_dir = osp.join(root_path, fname)
if osp.exists(metric_json_dir):
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
if len(json_list) > 0:
log_json_path = list(sorted(json_list))[-1]
metric = mmcv.load(log_json_path)
if config in metric.get('config', {}):
new_metrics = dict()
for record_metric_key in record_metrics:
record_metric_key_bk = record_metric_key
old_metric = record_metrics[record_metric_key]
if record_metric_key == 'AR_1000':
record_metric_key = 'AR@1000'
if record_metric_key not in metric['metric']:
raise KeyError(
'record_metric_key not exist, please '
'check your config')
new_metric = round(
metric['metric'][record_metric_key] * 100, 1)
new_metrics[record_metric_key_bk] = new_metric
if args.show_all:
result_dict[config] = dict(
before=record_metrics, after=new_metrics)
else:
for record_metric_key in record_metrics:
old_metric = record_metrics[record_metric_key]
new_metric = new_metrics[record_metric_key]
if old_metric != new_metric:
result_dict[config] = dict(
before=record_metrics,
after=new_metrics)
break
else:
print(f'{config} not included in: {log_json_path}')
else:
print(f'{config} not exist file: {metric_json_dir}')
else:
print(f'{config} not exist dir: {metric_json_dir}')
if metrics_out:
mmcv.mkdir_or_exist(metrics_out)
mmcv.dump(result_dict,
osp.join(metrics_out, 'batch_test_metric_info.json'))
if not args.not_show:
print('===================================')
for config_name, metrics in result_dict.items():
print(config_name, metrics)
print('===================================')
|
import os
import pytest
import requests
from jina import Flow
from tests.helper import (
ProcessExecutor,
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
from tests.unit.yaml.dummy_gateway_get_streamer import DummyGatewayGetStreamer
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-custom-gateway.yml'
)
_dummy_fastapi_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-fastapi-gateway.yml'
)
_flow_with_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-flow-custom-gateway-nested-config.yml'
)
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
(DummyGateway, {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
DummyGatewayGetStreamer,
{},
{'arg1': None, 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
DummyGateway,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
DummyGatewayGetStreamer,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
DummyGateway,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
DummyGatewayGetStreamer,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_flow_custom_gateway_no_executor(uses, uses_with, expected):
flow = (
Flow().config_gateway(uses=uses, uses_with=uses_with).add(uses=ProcessExecutor)
)
with flow:
_validate_dummy_custom_gateway_response(flow.port, expected)
_validate_custom_gateway_process(
flow.port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
def test_flow_fastapi_default_health_check():
flow = (
Flow()
.config_gateway(
uses=_dummy_fastapi_gateway_yaml_path,
uses_with={'default_health_check': True},
)
.add(uses='ProcessExecutor')
)
with flow:
_validate_dummy_custom_gateway_response(flow.port, {})
_validate_custom_gateway_process(
flow.port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
def test_flow_custom_gateway_nested_config():
flow = Flow.load_config(_flow_with_dummy_gateway_yaml_path)
with flow:
_validate_dummy_custom_gateway_response(
flow.port, {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}
)
def test_flow_custom_gateway_via_flow_uses_disabled():
uses_with = {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}
flow = Flow(uses='DummyGateway', uses_with=uses_with)
# the uses parameter is ignored here and not be applied on the gateway, therefore, the gateway
# is just a GRPC gateway
with pytest.raises(requests.ConnectionError):
with flow:
_ = requests.get(f'http://127.0.0.1:{flow.port}/').json()
|
import os
import pytest
import requests
from jina import Flow
from tests.helper import (
ProcessExecutor,
_validate_custom_gateway_process,
_validate_dummy_custom_gateway_response,
)
from tests.unit.yaml.dummy_gateway import DummyGateway
from tests.unit.yaml.dummy_gateway_get_streamer import DummyGatewayGetStreamer
cur_dir = os.path.dirname(os.path.abspath(__file__))
_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-custom-gateway.yml'
)
_dummy_fastapi_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-fastapi-gateway.yml'
)
_flow_with_dummy_gateway_yaml_path = os.path.join(
cur_dir, '../../../yaml/test-flow-custom-gateway-nested-config.yml'
)
@pytest.mark.parametrize(
'uses,uses_with,expected',
[
(DummyGateway, {}, {'arg1': None, 'arg2': None, 'arg3': 'default-arg3'}),
(
DummyGatewayGetStreamer,
{},
{'arg1': None, 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{},
{'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
DummyGateway,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
DummyGatewayGetStreamer,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
{'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'},
),
(
DummyGateway,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
DummyGatewayGetStreamer,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': None, 'arg3': 'default-arg3'},
),
(
_dummy_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
(
_dummy_fastapi_gateway_yaml_path,
{'arg1': 'arg1'},
{'arg1': 'arg1', 'arg2': 'world', 'arg3': 'default-arg3'},
),
],
)
def test_flow_custom_gateway_no_executor(uses, uses_with, expected):
flow = (
Flow().config_gateway(uses=uses, uses_with=uses_with).add(uses=ProcessExecutor)
)
with flow:
_validate_dummy_custom_gateway_response(flow.port, expected)
_validate_custom_gateway_process(
flow.port, 'hello', {'text': 'helloworld', 'tags': {'processed': True}}
)
def test_flow_custom_gateway_nested_config():
flow = Flow.load_config(_flow_with_dummy_gateway_yaml_path)
with flow:
_validate_dummy_custom_gateway_response(
flow.port, {'arg1': 'hello', 'arg2': 'world', 'arg3': 'default-arg3'}
)
def test_flow_custom_gateway_via_flow_uses_disabled():
uses_with = {'arg1': 'arg1', 'arg2': 'arg2', 'arg3': 'arg3'}
flow = Flow(uses='DummyGateway', uses_with=uses_with)
# the uses parameter is ignored here and not be applied on the gateway, therefore, the gateway
# is just a GRPC gateway
with pytest.raises(requests.ConnectionError):
with flow:
_ = requests.get(f'http://127.0.0.1:{flow.port}/').json()
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionUpscalePipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
backend_empty_cache,
enable_full_determinism,
numpy_cosine_similarity_distance,
require_torch_accelerator,
slow,
torch_device,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_accelerator
class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionUpscalePipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml"
repo_id = "stabilityai/stable-diffusion-x4-upscaler"
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
def test_single_file_format_inference_is_same_as_pretrained(self):
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png"
)
prompt = "a cat sitting on a park bench"
pipe = StableDiffusionUpscalePipeline.from_pretrained(self.repo_id)
pipe.enable_model_cpu_offload()
generator = torch.Generator("cpu").manual_seed(0)
output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3)
image_from_pretrained = output.images[0]
pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(self.ckpt_path)
pipe_from_single_file.enable_model_cpu_offload()
generator = torch.Generator("cpu").manual_seed(0)
output_from_single_file = pipe_from_single_file(
prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3
)
image_from_single_file = output_from_single_file.images[0]
assert image_from_pretrained.shape == (512, 512, 3)
assert image_from_single_file.shape == (512, 512, 3)
assert (
numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3
)
|
import gc
import unittest
import torch
from diffusers import (
StableDiffusionUpscalePipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
enable_full_determinism,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
)
from .single_file_testing_utils import SDSingleFileTesterMixin
enable_full_determinism()
@slow
@require_torch_gpu
class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin):
pipeline_class = StableDiffusionUpscalePipeline
ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors"
original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml"
repo_id = "stabilityai/stable-diffusion-x4-upscaler"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_single_file_format_inference_is_same_as_pretrained(self):
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png"
)
prompt = "a cat sitting on a park bench"
pipe = StableDiffusionUpscalePipeline.from_pretrained(self.repo_id)
pipe.enable_model_cpu_offload()
generator = torch.Generator("cpu").manual_seed(0)
output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3)
image_from_pretrained = output.images[0]
pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(self.ckpt_path)
pipe_from_single_file.enable_model_cpu_offload()
generator = torch.Generator("cpu").manual_seed(0)
output_from_single_file = pipe_from_single_file(
prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3
)
image_from_single_file = output_from_single_file.images[0]
assert image_from_pretrained.shape == (512, 512, 3)
assert image_from_single_file.shape == (512, 512, 3)
assert (
numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.layers.attention.attention import disable_flash_attention
from keras.src.layers.attention.attention import enable_flash_attention
from keras.src.layers.attention.attention import is_flash_attention_enabled
from keras.src.saving.serialization_lib import enable_unsafe_deserialization
from keras.src.utils.backend_utils import set_backend
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.traceback_utils import disable_traceback_filtering
from keras.src.utils.traceback_utils import enable_traceback_filtering
from keras.src.utils.traceback_utils import is_traceback_filtering_enabled
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.saving.serialization_lib import enable_unsafe_deserialization
from keras.src.utils.backend_utils import set_backend
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.traceback_utils import disable_traceback_filtering
from keras.src.utils.traceback_utils import enable_traceback_filtering
from keras.src.utils.traceback_utils import is_traceback_filtering_enabled
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.