input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredMarkdownLoader(UnstructuredFileLoader):
"""Load `Markdown` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Setup:
Install ``langchain-community``.
.. code-block:: bash
pip install -U langchain-community
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import UnstructuredMarkdownLoader
loader = UnstructuredMarkdownLoader(
"./example_data/example.md",
mode="elements",
strategy="fast",
)
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Sample Markdown Document
{'source': './example_data/example.md', 'category_depth': 0, 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'Title', 'element_id': '3d0b313864598e704aa26c728ecb61e5'}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Sample Markdown Document
{'source': './example_data/example.md', 'category_depth': 0, 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'Title', 'element_id': '3d0b313864598e704aa26c728ecb61e5'}
References
----------
https://unstructured-io.github.io/unstructured/core/partition.html#partition-md
""" # noqa: E501
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the Markdown file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
validate_unstructured_version("0.4.16")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.md import partition_md
return partition_md(filename=self.file_path, **self.unstructured_kwargs)
|
from pathlib import Path
from typing import Any, List, Union
from langchain_community.document_loaders.unstructured import (
UnstructuredFileLoader,
validate_unstructured_version,
)
class UnstructuredMarkdownLoader(UnstructuredFileLoader):
"""Load `Markdown` files using `Unstructured`.
You can run the loader in one of two modes: "single" and "elements".
If you use "single" mode, the document will be returned as a single
langchain Document object. If you use "elements" mode, the unstructured
library will split the document into elements such as Title and NarrativeText.
You can pass in additional unstructured kwargs after mode to apply
different unstructured settings.
Setup:
Install ``langchain-community``.
.. code-block:: bash
pip install -U langchain-community
Instantiate:
.. code-block:: python
from langchain_community.document_loaders import UnstructuredMarkdownLoader
loader = UnstructuredMarkdownLoader(
"./example_data/example.md",
mode="elements",
strategy="fast",
)
Lazy load:
.. code-block:: python
docs = []
docs_lazy = loader.lazy_load()
# async variant:
# docs_lazy = await loader.alazy_load()
for doc in docs_lazy:
docs.append(doc)
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Sample Markdown Document
{'source': './example_data/example.md', 'category_depth': 0, 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'Title', 'element_id': '3d0b313864598e704aa26c728ecb61e5'}
Async load:
.. code-block:: python
docs = await loader.aload()
print(docs[0].page_content[:100])
print(docs[0].metadata)
.. code-block:: python
Sample Markdown Document
{'source': './example_data/example.md', 'category_depth': 0, 'last_modified': '2024-08-14T15:04:18', 'languages': ['eng'], 'filetype': 'text/markdown', 'file_directory': './example_data', 'filename': 'example.md', 'category': 'Title', 'element_id': '3d0b313864598e704aa26c728ecb61e5'}
References
----------
https://unstructured-io.github.io/unstructured/core/partition.html#partition-md
""" # noqa: E501
def __init__(
self,
file_path: Union[str, Path],
mode: str = "single",
**unstructured_kwargs: Any,
):
"""
Args:
file_path: The path to the Markdown file to load.
mode: The mode to use when loading the file. Can be one of "single",
"multi", or "all". Default is "single".
**unstructured_kwargs: Any kwargs to pass to the unstructured.
"""
file_path = str(file_path)
validate_unstructured_version("0.4.16")
super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs)
def _get_elements(self) -> List:
from unstructured.partition.md import partition_md
return partition_md(filename=self.file_path, **self.unstructured_kwargs) # type: ignore[arg-type]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
from jina import Document, DocumentArray
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray(
[Document(chunks=[Document(text='hello world') for _ in range(10)])]
)
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(chunks=[Document(text='hello world') for _ in range(10)])
]
)
]
)
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
@pytest.fixture()
def docs_with_text() -> DocumentArray:
return DocumentArray([Document(text='hello world') for _ in range(10)])
@pytest.fixture()
def docs_with_chunk_text() -> DocumentArray:
return DocumentArray(
[Document(chunks=[Document(text='hello world') for _ in range(10)])]
)
@pytest.fixture()
def docs_with_chunk_chunk_text() -> DocumentArray:
return DocumentArray(
[
Document(
chunks=[
Document(chunks=[Document(text='hello world') for _ in range(10)])
]
)
]
)
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
from typing import Any, Dict, List, Optional, Union
from huggingface_hub import HfFileSystem
from . import config
from .table import CastError
from .utils.track import TrackedIterable, tracked_list, tracked_str
class DatasetsError(Exception):
"""Base class for exceptions in this library."""
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct."""
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
"""FileNotFoundError raised by this library."""
class DataFilesNotFoundError(FileNotFoundDatasetsError):
"""No (supported) data files found."""
class DatasetNotFoundError(FileNotFoundDatasetsError):
"""Dataset not found.
Raised when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
"""
class DatasetBuildError(DatasetsError):
pass
class ManualDownloadError(DatasetBuildError):
pass
class FileFormatError(DatasetBuildError):
pass
class DatasetGenerationError(DatasetBuildError):
pass
class DatasetGenerationCastError(DatasetGenerationError):
@classmethod
def from_cast_error(
cls,
cast_error: CastError,
builder_name: str,
gen_kwargs: Dict[str, Any],
token: Optional[Union[bool, str]],
) -> "DatasetGenerationCastError":
explanation_message = (
f"\n\nAll the data files must have the same columns, but at some point {cast_error.details()}"
)
formatted_tracked_gen_kwargs: List[str] = []
for gen_kwarg in gen_kwargs.values():
if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterable)):
continue
while isinstance(gen_kwarg, (tracked_list, TrackedIterable)) and gen_kwarg.last_item is not None:
gen_kwarg = gen_kwarg.last_item
if isinstance(gen_kwarg, tracked_str):
gen_kwarg = gen_kwarg.get_origin()
if isinstance(gen_kwarg, str) and gen_kwarg.startswith("hf://"):
resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg)
gen_kwarg = "hf://" + resolved_path.unresolve()
if "@" + resolved_path.revision in gen_kwarg:
gen_kwarg = (
gen_kwarg.replace("@" + resolved_path.revision, "", 1)
+ f" (at revision {resolved_path.revision})"
)
formatted_tracked_gen_kwargs.append(str(gen_kwarg))
if formatted_tracked_gen_kwargs:
explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}"
help_message = "\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)"
return cls("An error occurred while generating the dataset" + explanation_message + help_message)
|
# SPDX-License-Identifier: Apache-2.0
# Copyright 2023 The HuggingFace Authors.
class DatasetsError(Exception):
"""Base class for exceptions in this library."""
class DefunctDatasetError(DatasetsError):
"""The dataset has been defunct."""
class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError):
"""FileNotFoundError raised by this library."""
class DataFilesNotFoundError(FileNotFoundDatasetsError):
"""No (supported) data files found."""
class DatasetNotFoundError(FileNotFoundDatasetsError):
"""Dataset not found.
Raised when trying to access:
- a missing dataset, or
- a private/gated dataset and the user is not authenticated.
"""
|
import os
import time
import pytest
from jina import Deployment, Executor
class SlowExecutor(Executor):
def close(self) -> None:
with open(
os.path.join(self.metas.workspace, 'test'), 'w', encoding='utf-8'
) as f:
time.sleep(10)
f.write('x')
@pytest.mark.slow
def test_slow_executor_close(tmpdir):
with Deployment(protocol='http',
uses={'jtype': 'SlowExecutor', 'with': {}, 'metas': {'workspace': str(tmpdir)}}, include_gateway=False,
):
pass
assert os.path.exists(os.path.join(tmpdir, 'test'))
|
import os
import time
import pytest
from jina import Flow, Executor
class SlowExecutor(Executor):
def close(self) -> None:
with open(
os.path.join(self.metas.workspace, 'test'), 'w', encoding='utf-8'
) as f:
time.sleep(10)
f.write('x')
@pytest.mark.slow
def test_slow_executor_close(tmpdir):
with Flow().add(
uses={'jtype': 'SlowExecutor', 'with': {}, 'metas': {'workspace': str(tmpdir)}}
) as f:
pass
assert os.path.exists(os.path.join(tmpdir, 'test'))
|
"""Module for helper functions for clients."""
from typing import Tuple
from docarray import Document, DocumentArray
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
_kwargs, batch, data_type, endpoint, target, parameters
):
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type, _kwargs)
return req
def _new_data_request(endpoint, target, parameters):
req = DataRequest()
# set up header
if endpoint:
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType, **kwargs
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data, **kwargs), DataInputType.CONTENT
if data_type == DataInputType.DICT:
doc = Document.from_dict(data)
return doc, DataInputType.DICT
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return Document.from_dict(data), DataInputType.DICT
try:
d = Document(data, **kwargs)
return d, DataInputType.DOCUMENT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req, batch, data_type, _kwargs):
da = DocumentArray()
for content in batch:
if isinstance(content, tuple) and len(content) == 2:
d, data_type = _new_doc_from_data(content[0], data_type, **_kwargs)
da.append(d)
else:
d, data_type = _new_doc_from_data(content, data_type, **_kwargs)
da.append(d)
req.data.docs = da
|
"""Module for helper functions for clients."""
from typing import Tuple
from docarray import Document, DocumentArray
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
def _new_data_request_from_batch(
_kwargs, batch, data_type, endpoint, target, parameters
):
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type, _kwargs)
return req
def _new_data_request(endpoint, target, parameters):
req = DataRequest()
# set up header
if endpoint:
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType, **kwargs
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data, **kwargs), DataInputType.CONTENT
if data_type == DataInputType.DICT:
doc = Document.from_dict(data)
return doc, DataInputType.DICT
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return Document.from_dict(data), DataInputType.DICT
try:
d = Document(data, **kwargs)
return d, DataInputType.DOCUMENT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req, batch, data_type, _kwargs):
da = DocumentArray()
for content in batch:
if isinstance(content, tuple) and len(content) == 2:
d, data_type = _new_doc_from_data(content[0], data_type, **_kwargs)
gt, _ = _new_doc_from_data(content[1], data_type, **_kwargs)
da.append(d)
else:
d, data_type = _new_doc_from_data(content, data_type, **_kwargs)
da.append(d)
req.data.docs = da
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
import os
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import Variable
from keras.api import __version__
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
# END DO NOT EDIT.
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
import os
# DO NOT EDIT. Generated by api_gen.sh
from keras.api import DTypePolicy
from keras.api import FloatDTypePolicy
from keras.api import Function
from keras.api import Initializer
from keras.api import Input
from keras.api import InputSpec
from keras.api import KerasTensor
from keras.api import Layer
from keras.api import Loss
from keras.api import Metric
from keras.api import Model
from keras.api import Operation
from keras.api import Optimizer
from keras.api import Quantizer
from keras.api import Regularizer
from keras.api import Sequential
from keras.api import StatelessScope
from keras.api import Variable
from keras.api import __version__
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import device
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import name_scope
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import version
# END DO NOT EDIT.
# Add everything in /api/ to the module search path.
__path__.append(os.path.join(os.path.dirname(__file__), "api")) # noqa: F405
# Don't pollute namespace.
del os
# Never autocomplete `.src` or `.api` on an imported keras object.
def __dir__():
keys = dict.fromkeys((globals().keys()))
keys.pop("src")
keys.pop("api")
return list(keys)
# Don't import `.src` or `.api` during `from keras import *`.
__all__ = [
name
for name in globals().keys()
if not (name.startswith("_") or name in ("src", "api"))
]
|
import logging
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import ExecutionResult
from backend.executor import ExecutionManager
from backend.server.external.middleware import require_permission
from backend.util.service import get_service_client
from backend.util.settings import Settings
@thread_cached
def execution_manager_client() -> ExecutionManager:
return get_service_client(ExecutionManager)
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: List[ExecutionResult]) -> List[Dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute",
tags=["graphs"],
)
def execute_graph(
graph_id: str,
node_input: dict[Any, Any],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = execution_manager_client().add_execution(
graph_id, node_input, user_id=api_key.user_id
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = e.__str__().encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import logging
from collections import defaultdict
from typing import Any, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Depends, HTTPException
from prisma.enums import APIKeyPermission
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.executor import ExecutionManager
from backend.server.external.middleware import require_permission
from backend.util.service import get_service_client
from backend.util.settings import Settings
@thread_cached
def execution_manager_client() -> ExecutionManager:
return get_service_client(ExecutionManager)
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute",
tags=["graphs"],
)
def execute_graph(
graph_id: str,
node_input: dict[Any, Any],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = execution_manager_client().add_execution(
graph_id, node_input, user_id=api_key.user_id
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = e.__str__().encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> dict:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_execution_results(graph_exec_id)
return {
"execution_id": graph_exec_id,
"nodes": [
{
"node_id": result.node_id,
"input": (
result.input_data.get("value")
if "value" in result.input_data
else result.input_data
),
"output": result.output_data.get(
"response", result.output_data.get("result", [])
),
}
for result in results
],
}
|
_base_ = [
'../_base_/models/rpn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
val_evaluator = dict(metric='proposal_fast')
test_evaluator = val_evaluator
|
_base_ = [
'../_base_/models/rpn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# dataset settings
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_label=False),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
evaluation = dict(interval=1, metric='proposal_fast')
|
"""Test chat model integration using standard integration tests."""
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_ollama.chat_models import ChatOllama
class TestChatOllama(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3.1"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def has_tool_choice(self) -> bool:
return False
|
"""Test chat model integration using standard integration tests."""
from typing import Type
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_ollama.chat_models import ChatOllama
class TestChatOllama(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": "llama3.1"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def has_tool_choice(self) -> bool:
return False
|
import logging
import bleach
from bleach.css_sanitizer import CSSSanitizer
from jinja2 import BaseLoader
from jinja2.sandbox import SandboxedEnvironment
from markupsafe import Markup
logger = logging.getLogger(__name__)
def format_filter_for_jinja2(value, format_string=None):
if format_string:
return format_string % float(value)
return value
class TextFormatter:
def __init__(self):
self.env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True)
self.env.globals.clear()
# Instead of clearing all filters, just remove potentially unsafe ones
unsafe_filters = ["pprint", "urlize", "xmlattr", "tojson"]
for f in unsafe_filters:
if f in self.env.filters:
del self.env.filters[f]
self.env.filters["format"] = format_filter_for_jinja2
# Define allowed CSS properties
allowed_css_properties = [
"font-family",
"color",
"font-size",
"line-height",
"margin-top",
"margin-bottom",
"margin-left",
"margin-right",
"background-color",
"padding",
"border-radius",
"font-weight",
"text-align",
]
self.css_sanitizer = CSSSanitizer(allowed_css_properties=allowed_css_properties)
self.allowed_tags = [
"p",
"b",
"i",
"u",
"ul",
"li",
"br",
"strong",
"em",
"div",
"span",
]
self.allowed_attributes = {"*": ["style", "class"]}
def format_string(self, template_str: str, values=None, **kwargs) -> str:
"""Regular template rendering with escaping"""
template = self.env.from_string(template_str)
return template.render(values or {}, **kwargs)
def format_email(
self,
subject_template: str,
base_template: str,
content_template: str,
data=None,
**kwargs,
) -> tuple[str, str]:
"""
Special handling for email templates where content needs to be rendered as HTML
"""
# First render the content template
content = self.format_string(content_template, data, **kwargs)
# Clean the HTML + CSS but don't escape it
clean_content = bleach.clean(
content,
tags=self.allowed_tags,
attributes=self.allowed_attributes,
css_sanitizer=self.css_sanitizer,
strip=True,
)
# Mark the cleaned HTML as safe using Markup
safe_content = Markup(clean_content)
# Render subject
rendered_subject_template = self.format_string(subject_template, data, **kwargs)
# Create new env just for HTML template
html_env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True)
html_env.filters["safe"] = lambda x: (
x if isinstance(x, Markup) else Markup(str(x))
)
# Render base template with the safe content
template = html_env.from_string(base_template)
rendered_base_template = template.render(
data={
"message": safe_content,
"title": rendered_subject_template,
"unsubscribe_link": kwargs.get("unsubscribe_link", ""),
}
)
return rendered_subject_template, rendered_base_template
|
import logging
import bleach
from jinja2 import BaseLoader
from jinja2.sandbox import SandboxedEnvironment
from markupsafe import Markup
logger = logging.getLogger(__name__)
class TextFormatter:
def __init__(self):
self.env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True)
self.env.filters.clear()
self.env.tests.clear()
self.env.globals.clear()
self.allowed_tags = ["p", "b", "i", "u", "ul", "li", "br", "strong", "em"]
self.allowed_attributes = {"*": ["style", "class"]}
def format_string(self, template_str: str, values=None, **kwargs) -> str:
"""Regular template rendering with escaping"""
template = self.env.from_string(template_str)
return template.render(values or {}, **kwargs)
def format_email(
self,
subject_template: str,
base_template: str,
content_template: str,
data=None,
**kwargs,
) -> tuple[str, str]:
"""
Special handling for email templates where content needs to be rendered as HTML
"""
# First render the content template
content = self.format_string(content_template, data, **kwargs)
# Clean the HTML but don't escape it
clean_content = bleach.clean(
content,
tags=self.allowed_tags,
attributes=self.allowed_attributes,
strip=True,
)
# Mark the cleaned HTML as safe using Markup
safe_content = Markup(clean_content)
rendered_subject_template = self.format_string(subject_template, data, **kwargs)
# Create new env just for HTML template
html_env = SandboxedEnvironment(loader=BaseLoader(), autoescape=True)
html_env.filters["safe"] = lambda x: (
x if isinstance(x, Markup) else Markup(str(x))
)
# Render base template with the safe content
template = html_env.from_string(base_template)
rendered_base_template = template.render(
data={
"message": safe_content,
"title": rendered_subject_template,
"unsubscribe_link": kwargs.get("unsubscribe_link", ""),
}
)
return rendered_subject_template, rendered_base_template
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.vertexai import (
create_retry_decorator,
get_client_info,
init_vertexai,
raise_vertex_import_error,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_retry_decorator": "langchain_community.utilities.vertexai",
"raise_vertex_import_error": "langchain_community.utilities.vertexai",
"init_vertexai": "langchain_community.utilities.vertexai",
"get_client_info": "langchain_community.utilities.vertexai",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_retry_decorator",
"get_client_info",
"init_vertexai",
"raise_vertex_import_error",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities.vertexai import (
create_retry_decorator,
get_client_info,
init_vertexai,
raise_vertex_import_error,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_retry_decorator": "langchain_community.utilities.vertexai",
"raise_vertex_import_error": "langchain_community.utilities.vertexai",
"init_vertexai": "langchain_community.utilities.vertexai",
"get_client_info": "langchain_community.utilities.vertexai",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_retry_decorator",
"raise_vertex_import_error",
"init_vertexai",
"get_client_info",
]
|
"""Tools for interacting with an Apache Cassandra database."""
from typing import List
from llama_index.core.bridge.pydantic import Field
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.cassandra.cassandra_database_wrapper import (
CassandraDatabase,
)
class CassandraDatabaseToolSpec(BaseToolSpec):
"""Base tool for interacting with an Apache Cassandra database."""
db: CassandraDatabase = Field(exclude=True)
spec_functions = [
"cassandra_db_query",
"cassandra_db_schema",
"cassandra_db_select_table_data",
]
def __init__(self, db: CassandraDatabase) -> None:
"""DB session in context."""
self.db = db
def cassandra_db_query(self, query: str) -> List[Document]:
"""
Execute a CQL query and return the results as a list of Documents.
Args:
query (str): A CQL query to execute.
Returns:
List[Document]: A list of Document objects, each containing data from a row.
"""
documents = []
result = self.db.run_no_throw(query, fetch="Cursor")
for row in result:
doc_str = ", ".join([str(value) for value in row])
documents.append(Document(text=doc_str))
return documents
def cassandra_db_schema(self, keyspace: str) -> List[Document]:
"""
Input to this tool is a keyspace name, output is a table description
of Apache Cassandra tables.
If the query is not correct, an error message will be returned.
If an error is returned, report back to the user that the keyspace
doesn't exist and stop.
Args:
keyspace (str): The name of the keyspace for which to return the schema.
Returns:
List[Document]: A list of Document objects, each containing a table description.
"""
return [Document(text=self.db.get_keyspace_tables_str(keyspace))]
def cassandra_db_select_table_data(
self, keyspace: str, table: str, predicate: str, limit: int
) -> List[Document]:
"""
Tool for getting data from a table in an Apache Cassandra database.
Use the WHERE clause to specify the predicate for the query that uses the
primary key. A blank predicate will return all rows. Avoid this if possible.
Use the limit to specify the number of rows to return. A blank limit will
return all rows.
Args:
keyspace (str): The name of the keyspace containing the table.
table (str): The name of the table for which to return data.
predicate (str): The predicate for the query that uses the primary key.
limit (int): The maximum number of rows to return.
Returns:
List[Document]: A list of Document objects, each containing a row of data.
"""
return [
Document(text=self.db.get_table_data(keyspace, table, predicate, limit))
]
|
"""Tools for interacting with an Apache Cassandra database."""
from typing import List
from llama_index.core.bridge.pydantic import Field
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.tools.cassandra.cassandra_database_wrapper import (
CassandraDatabase,
)
class CassandraDatabaseToolSpec(BaseToolSpec):
"""Base tool for interacting with an Apache Cassandra database."""
db: CassandraDatabase = Field(exclude=True)
spec_functions = [
"cassandra_db_query",
"cassandra_db_schema",
"cassandra_db_select_table_data",
]
def __init__(self, db: CassandraDatabase) -> None:
"""DB session in context."""
self.db = db
def cassandra_db_query(self, query: str) -> List[Document]:
"""
Execute a CQL query and return the results as a list of Documents.
Args:
query (str): A CQL query to execute.
Returns:
List[Document]: A list of Document objects, each containing data from a row.
"""
documents = []
result = self.db.run_no_throw(query, fetch="Cursor")
for row in result:
doc_str = ", ".join([str(value) for value in row])
documents.append(Document(text=doc_str))
return documents
def cassandra_db_schema(self, keyspace: str) -> List[Document]:
"""
Input to this tool is a keyspace name, output is a table description
of Apache Cassandra tables.
If the query is not correct, an error message will be returned.
If an error is returned, report back to the user that the keyspace
doesn't exist and stop.
Args:
keyspace (str): The name of the keyspace for which to return the schema.
Returns:
List[Document]: A list of Document objects, each containing a table description.
"""
return [Document(text=self.db.get_keyspace_tables_str(keyspace))]
def cassandra_db_select_table_data(
self, keyspace: str, table: str, predicate: str, limit: int
) -> List[Document]:
"""
Tool for getting data from a table in an Apache Cassandra database.
Use the WHERE clause to specify the predicate for the query that uses the
primary key. A blank predicate will return all rows. Avoid this if possible.
Use the limit to specify the number of rows to return. A blank limit will
return all rows.
Args:
keyspace (str): The name of the keyspace containing the table.
table (str): The name of the table for which to return data.
predicate (str): The predicate for the query that uses the primary key.
limit (int): The maximum number of rows to return.
Returns:
List[Document]: A list of Document objects, each containing a row of data.
"""
return [
Document(text=self.db.get_table_data(keyspace, table, predicate, limit))
]
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from packaging.version import Version, parse
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
from model2vec import __version__ as M2V_VERSION
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
expected_shape = (29525 if parse(M2V_VERSION) >= Version("0.5.0") else 29528, 32)
assert model.embedding.weight.shape == expected_shape
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from __future__ import annotations
import math
from pathlib import Path
import numpy as np
import pytest
from tokenizers import Tokenizer
from sentence_transformers import SentenceTransformer
from sentence_transformers.models.StaticEmbedding import StaticEmbedding
try:
import model2vec
except ImportError:
model2vec = None
skip_if_no_model2vec = pytest.mark.skipif(model2vec is None, reason="The model2vec library is not installed.")
@pytest.fixture(scope="session")
def tokenizer() -> Tokenizer:
return Tokenizer.from_pretrained("bert-base-uncased")
@pytest.fixture
def embedding_weights():
return np.random.rand(30522, 768)
@pytest.fixture
def static_embedding(tokenizer: Tokenizer, embedding_weights) -> StaticEmbedding:
return StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
def test_initialization_with_embedding_weights(tokenizer: Tokenizer, embedding_weights) -> None:
model = StaticEmbedding(tokenizer, embedding_weights=embedding_weights)
assert model.embedding.weight.shape == (30522, 768)
def test_initialization_with_embedding_dim(tokenizer: Tokenizer) -> None:
model = StaticEmbedding(tokenizer, embedding_dim=768)
assert model.embedding.weight.shape == (30522, 768)
def test_tokenize(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
assert "input_ids" in tokens
assert "offsets" in tokens
def test_forward(static_embedding: StaticEmbedding) -> None:
texts = ["Hello world!", "How are you?"]
tokens = static_embedding.tokenize(texts)
output = static_embedding(tokens)
assert "sentence_embedding" in output
def test_save_and_load(tmp_path: Path, static_embedding: StaticEmbedding) -> None:
save_dir = tmp_path / "model"
save_dir.mkdir()
static_embedding.save(str(save_dir))
loaded_model = StaticEmbedding.load(str(save_dir))
assert loaded_model.embedding.weight.shape == static_embedding.embedding.weight.shape
@skip_if_no_model2vec()
def test_from_distillation() -> None:
model = StaticEmbedding.from_distillation("sentence-transformers-testing/stsb-bert-tiny-safetensors", pca_dims=32)
assert model.embedding.weight.shape == (29528, 32)
@skip_if_no_model2vec()
def test_from_model2vec() -> None:
model = StaticEmbedding.from_model2vec("minishlab/M2V_base_output")
assert model.embedding.weight.shape == (29528, 256)
def test_loading_model2vec() -> None:
model = SentenceTransformer("minishlab/potion-base-8M")
assert model.get_sentence_embedding_dimension() == 256
assert model.max_seq_length == math.inf
test_sentences = ["It's so sunny outside!", "The sun is shining outside!"]
embeddings = model.encode(test_sentences)
assert embeddings.shape == (2, 256)
similarity = model.similarity(embeddings[0], embeddings[1])
assert similarity.item() > 0.7
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import datapoints
from torchvision.prototype.transforms import functional as F, Transform
from torchvision.prototype.transforms.utils import is_simple_tensor
class LabelToOneHot(Transform):
_transformed_types = (datapoints.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: datapoints.Label, params: Dict[str, Any]) -> datapoints.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return datapoints.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
class PILToTensor(Transform):
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: Union[PIL.Image.Image], params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
_transformed_types = (is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> datapoints.Image:
return F.to_image_tensor(inpt) # type: ignore[no-any-return]
class ToImagePIL(Transform):
_transformed_types = (is_simple_tensor, datapoints.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
from typing import Any, Dict, Optional, Union
import numpy as np
import PIL.Image
import torch
from torch.nn.functional import one_hot
from torchvision.prototype import features
from torchvision.prototype.transforms import functional as F, Transform
class LabelToOneHot(Transform):
_transformed_types = (features.Label,)
def __init__(self, num_categories: int = -1):
super().__init__()
self.num_categories = num_categories
def _transform(self, inpt: features.Label, params: Dict[str, Any]) -> features.OneHotLabel:
num_categories = self.num_categories
if num_categories == -1 and inpt.categories is not None:
num_categories = len(inpt.categories)
output = one_hot(inpt.as_subclass(torch.Tensor), num_classes=num_categories)
return features.OneHotLabel(output, categories=inpt.categories)
def extra_repr(self) -> str:
if self.num_categories == -1:
return ""
return f"num_categories={self.num_categories}"
class PILToTensor(Transform):
_transformed_types = (PIL.Image.Image,)
def _transform(self, inpt: Union[PIL.Image.Image], params: Dict[str, Any]) -> torch.Tensor:
return F.pil_to_tensor(inpt)
class ToImageTensor(Transform):
_transformed_types = (features.is_simple_tensor, PIL.Image.Image, np.ndarray)
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> features.Image:
return F.to_image_tensor(inpt) # type: ignore[no-any-return]
class ToImagePIL(Transform):
_transformed_types = (features.is_simple_tensor, features.Image, np.ndarray)
def __init__(self, mode: Optional[str] = None) -> None:
super().__init__()
self.mode = mode
def _transform(
self, inpt: Union[torch.Tensor, PIL.Image.Image, np.ndarray], params: Dict[str, Any]
) -> PIL.Image.Image:
return F.to_image_pil(inpt, mode=self.mode)
# We changed the name to align them with the new naming scheme. Still, `ToPILImage` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
ToPILImage = ToImagePIL
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
logging.info("Model max length: %s", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/msmarco-bm25", "triplet", split="train").select(range(100000))
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
"""
This scripts demonstrates how to train a Sparse Encoder model for Information Retrieval.
As dataset, we use sentence-transformers/msmarco-bm25, where we have triplets versions of MSMARCO mined thanks to BM25.
As loss function, we use MultipleNegativesRankingLoss in the SpladeLoss.
"""
import logging
import traceback
from datasets import load_dataset
from sentence_transformers import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.sparse_encoder import evaluation, losses
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
def main():
model_name = "distilbert/distilbert-base-uncased"
train_batch_size = 12
num_epochs = 1
lambda_query = 5e-5
lambda_corpus = 3e-5
learning_rate = 2e-5
# 1. Define our SparseEncoder model
model = SparseEncoder(
model_name,
model_card_data=SparseEncoderModelCardData(
language="en",
license="apache-2.0",
model_name="splade-distilbert-base-uncased trained on Quora Duplicates Questions",
),
)
model.max_seq_length = 256 # Set the max sequence length to 256 for the training
logging.info("Model max length: %s", model.max_seq_length)
# 2. Load the MS MARCO dataset: https://huggingface.co/datasets/sentence-transformers/msmarco-bm25
logging.info("Read the MS MARCO training dataset")
full_dataset = load_dataset("sentence-transformers/quora-duplicates", "triplet", split="train").select(
range(100000)
)
dataset_dict = full_dataset.train_test_split(test_size=1_000, seed=12)
train_dataset = dataset_dict["train"]
eval_dataset = dataset_dict["test"]
logging.info(train_dataset)
logging.info(eval_dataset)
# 3. Define our training loss
loss = losses.SpladeLoss(
model=model,
loss=losses.SparseMultipleNegativesRankingLoss(model=model),
lambda_query=lambda_query, # Weight for query loss
lambda_corpus=lambda_corpus, # Weight for document loss
)
# 4. Define the evaluator. We use the SparseNanoBEIREvaluator, which is a light-weight evaluator for English
evaluator = evaluation.SparseNanoBEIREvaluator(
dataset_names=["msmarco", "nfcorpus", "nq"], batch_size=train_batch_size
)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"splade-{short_model_name}-msmarco-mrl"
args = SparseEncoderTrainingArguments(
# Required parameter:
output_dir=f"models/{run_name}",
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
learning_rate=learning_rate,
load_best_model_at_end=True,
metric_for_best_model="eval_NanoBEIR_mean_dot_ndcg@10",
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=1650,
save_strategy="steps",
save_steps=1650,
save_total_limit=2,
logging_steps=200,
run_name=run_name, # Will be used in W&B if `wandb` is installed
seed=42,
)
# 6. Create the trainer & start training
trainer = SparseEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=evaluator,
)
trainer.train()
# 7. Evaluate the final model, using the complete NanoBEIR dataset
test_evaluator = evaluation.SparseNanoBEIREvaluator(show_progress_bar=True, batch_size=train_batch_size)
test_evaluator(model)
# 8. Save the final model
final_output_dir = f"models/{run_name}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SparseEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
|
"""Pydantic v1 compatibility shim."""
from pydantic.v1.main import * # noqa: F403
from langchain_core._api import warn_deprecated
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
"""Pydantic v1 compatibility shim."""
from langchain_core._api import warn_deprecated
try:
from pydantic.v1.main import * # noqa: F403
except ImportError:
from pydantic.main import * # type: ignore[assignment,no-redef] # noqa: F403
warn_deprecated(
"0.3.0",
removal="1.0.0",
alternative="pydantic.v1 or pydantic",
message=(
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
"The langchain_core.pydantic_v1 module was a "
"compatibility shim for pydantic v1, and should no longer be used. "
"Please update the code to import from Pydantic directly.\n\n"
"For example, replace imports like: "
"`from langchain_core.pydantic_v1 import BaseModel`\n"
"with: `from pydantic import BaseModel`\n"
"or the v1 compatibility namespace if you are working in a code base "
"that has not been fully upgraded to pydantic 2 yet. "
"\tfrom pydantic.v1 import BaseModel\n"
),
)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"]),
)
except JSONDecodeError as e:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg) from e
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
tool_input = _tool_input.get("__arg1", _tool_input)
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
),
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
import json
from json import JSONDecodeError
from typing import Union
from langchain_core.agents import AgentAction, AgentActionMessageLog, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
class ToolAgentAction(AgentActionMessageLog):
tool_call_id: str
"""Tool call that this message is responding to."""
def parse_ai_message_to_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
if not isinstance(message, AIMessage):
msg = f"Expected an AI message got {type(message)}"
raise TypeError(msg)
actions: list = []
if message.tool_calls:
tool_calls = message.tool_calls
else:
if not message.additional_kwargs.get("tool_calls"):
return AgentFinish(
return_values={"output": message.content},
log=str(message.content),
)
# Best-effort parsing
tool_calls = []
for tool_call in message.additional_kwargs["tool_calls"]:
function = tool_call["function"]
function_name = function["name"]
try:
args = json.loads(function["arguments"] or "{}")
tool_calls.append(
ToolCall(name=function_name, args=args, id=tool_call["id"]),
)
except JSONDecodeError:
msg = (
f"Could not parse tool input: {function} because "
f"the `arguments` is not valid JSON."
)
raise OutputParserException(msg)
for tool_call in tool_calls:
# HACK HACK HACK:
# The code that encodes tool input into Open AI uses a special variable
# name called `__arg1` to handle old style tools that do not expose a
# schema and expect a single string argument as an input.
# We unpack the argument here if it exists.
# Open AI does not support passing in a JSON array as an argument.
function_name = tool_call["name"]
_tool_input = tool_call["args"]
tool_input = _tool_input.get("__arg1", _tool_input)
content_msg = f"responded: {message.content}\n" if message.content else "\n"
log = f"\nInvoking: `{function_name}` with `{tool_input}`\n{content_msg}\n"
actions.append(
ToolAgentAction(
tool=function_name,
tool_input=tool_input,
log=log,
message_log=[message],
tool_call_id=tool_call["id"],
),
)
return actions
class ToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "tools-agent-output-parser"
def parse_result(
self,
result: list[Generation],
*,
partial: bool = False,
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
msg = "This output parser only works on ChatGeneration output"
raise ValueError(msg)
message = result[0].message
return parse_ai_message_to_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
msg = "Can only parse messages"
raise ValueError(msg)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to set up DTensor backend in tests."""
# LINT.IfChange
import multiprocessing
import os
from tensorflow.dtensor.python import accelerator_util
from tensorflow.dtensor.python.tests.test_backend_name import DTENSOR_TEST_UTIL_BACKEND
from tensorflow.python.platform import test as tf_test
class DTensorTestBackendConfigurator:
"""Configurate test backends."""
def __init__(self, test_case: tf_test.TestCase):
self._test_case = test_case
# TODO(b/260771689): Refactor common backend set up logic to here.
def tearDown(self):
# Only need to explicitly shuts down TPU system in TFRT since in current
# runtime, the shutdown is done in initialization process.
if accelerator_util.is_initialized():
accelerator_util.shutdown_accelerator_system()
def slice_host_devices_for_multiworker(num_clients, client_id, ports):
"""Configure the current process to only use a slice of devices."""
if num_clients == 0:
# All GPUs are visible to the client.
del os.environ['CUDA_VISIBLE_DEVICES']
del os.environ['HIP_VISIBLE_DEVICES']
else:
# Make the client_id-th GPU visible to the client.
os.environ['CUDA_VISIBLE_DEVICES'] = f'{client_id}'
os.environ['HIP_VISIBLE_DEVICES'] = f'{client_id}'
# Make the client_id-th (4x) TPU cores visible to the client.
os.environ['CLOUD_TPU_TASK_ID'] = f'{client_id}'
if 'tpu' in DTENSOR_TEST_UTIL_BACKEND.value:
del ports # Unused due to lack of implementation.
# We need to find out if there is a way to slice a CloudTPU host to
# multiple workers.
raise NotImplementedError(
'OSS multi-client tests of TPU is not supported.'
)
def get_mp_context():
return multiprocessing.get_context('forkserver')
def handle_test_main(main, *args, **kwargs):
main(*args, **kwargs)
# LINT.ThenChange(test_backend_util.py)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility to set up DTensor backend in tests."""
# LINT.IfChange
import multiprocessing
import os
from tensorflow.dtensor.python import accelerator_util
from tensorflow.dtensor.python import config
from tensorflow.dtensor.python import layout as layout_lib
from tensorflow.dtensor.python.tests.test_backend_name import DTENSOR_TEST_UTIL_BACKEND
from tensorflow.python.platform import test as tf_test
class DTensorTestBackendConfigurator:
"""Configurate test backends."""
def __init__(self, test_case: tf_test.TestCase):
self._test_case = test_case
# TODO(b/260771689): Refactor common backend set up logic to here.
def tearDown(self):
# Only need to explicitly shuts down TPU system in TFRT since in current
# runtime, the shutdown is done in initialization process.
if accelerator_util.is_initialized():
accelerator_util.shutdown_accelerator_system()
def config_test_mesh(mesh: layout_lib.Mesh):
"""No Op.
Args:
mesh: The DTensor mesh.
"""
if config.backend_is_pw():
del mesh
def slice_host_devices_for_multiworker(num_clients, client_id, ports):
"""Configure the current process to only use a slice of devices."""
if num_clients == 0:
# All GPUs are visible to the client.
del os.environ['CUDA_VISIBLE_DEVICES']
del os.environ['HIP_VISIBLE_DEVICES']
else:
# Make the client_id-th GPU visible to the client.
os.environ['CUDA_VISIBLE_DEVICES'] = f'{client_id}'
os.environ['HIP_VISIBLE_DEVICES'] = f'{client_id}'
# Make the client_id-th (4x) TPU cores visible to the client.
os.environ['CLOUD_TPU_TASK_ID'] = f'{client_id}'
if 'tpu' in DTENSOR_TEST_UTIL_BACKEND.value:
del ports # Unused due to lack of implementation.
# We need to find out if there is a way to slice a CloudTPU host to
# multiple workers.
raise NotImplementedError(
'OSS multi-client tests of TPU is not supported.'
)
def get_mp_context():
return multiprocessing.get_context('forkserver')
def handle_test_main(main, *args, **kwargs):
main(*args, **kwargs)
# LINT.ThenChange(test_backend_util.py)
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# please install mmcls>=1.0
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-tiny_3rdparty_32xb128-noema_in1k_20220301-795e9634.pth' # noqa
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='tiny',
out_indices=[0, 1, 2, 3],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
gap_before_final_norm=False,
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[96, 192, 384, 768]))
# augmentation strategy originates from DETR / Sparse RCNN
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise',
'num_layers': 6
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
|
from deprecated import deprecated
from typing import Optional
from .workflow import Workflow
from .events import StartEvent, StopEvent
from .decorators import StepConfig
from .utils import get_steps_from_class, get_steps_from_instance
@deprecated(
reason="Install `llama-index-utils-workflow` and use the import `from llama_index.utils.workflow` instead."
)
def draw_all_possible_flows(
workflow: Workflow,
filename: str = "workflow_all_flows.html",
notebook: bool = False,
) -> None:
"""Draws all possible flows of the workflow."""
from pyvis.network import Network
net = Network(directed=True, height="750px", width="100%")
# Add the nodes + edge for stop events
net.add_node(
StopEvent.__name__,
label=StopEvent.__name__,
color="#FFA07A",
shape="ellipse",
)
net.add_node("_done", label="_done", color="#ADD8E6", shape="box")
net.add_edge(StopEvent.__name__, "_done")
# Add nodes from all steps
steps = get_steps_from_class(workflow)
if not steps:
# If no steps are defined in the class, try to get them from the instance
steps = get_steps_from_instance(workflow)
step_config: Optional[StepConfig] = None
for step_name, step_func in steps.items():
step_config = getattr(step_func, "__step_config", None)
if step_config is None:
continue
net.add_node(
step_name, label=step_name, color="#ADD8E6", shape="box"
) # Light blue for steps
for event_type in step_config.accepted_events:
net.add_node(
event_type.__name__,
label=event_type.__name__,
color="#90EE90" if event_type != StartEvent else "#E27AFF",
shape="ellipse",
) # Light green for events
# Add edges from all steps
for step_name, step_func in steps.items():
step_config = getattr(step_func, "__step_config", None)
if step_config is None:
continue
for return_type in step_config.return_types:
if return_type is not type(None):
net.add_edge(step_name, return_type.__name__)
for event_type in step_config.accepted_events:
net.add_edge(event_type.__name__, step_name)
net.show(filename, notebook=notebook)
@deprecated(
reason="Install `llama-index-utils-workflow` and use the import `from llama_index.utils.workflow` instead."
)
def draw_most_recent_execution(
workflow: Workflow,
filename: str = "workflow_recent_execution.html",
notebook: bool = False,
) -> None:
"""Draws the most recent execution of the workflow."""
from pyvis.network import Network
net = Network(directed=True, height="750px", width="100%")
# Add nodes and edges based on execution history
existing_context = next(iter(workflow._contexts), None)
if existing_context is None:
raise ValueError("No runs found in workflow")
for i, (step, event) in enumerate(existing_context._accepted_events):
event_node = f"{event}_{i}"
step_node = f"{step}_{i}"
net.add_node(
event_node, label=event, color="#90EE90", shape="ellipse"
) # Light green for events
net.add_node(
step_node, label=step, color="#ADD8E6", shape="box"
) # Light blue for steps
net.add_edge(event_node, step_node)
if i > 0:
prev_step_node = f"{existing_context._accepted_events[i - 1][0]}_{i - 1}"
net.add_edge(prev_step_node, event_node)
net.show(filename, notebook=notebook)
|
from deprecated import deprecated
from typing import Optional
from .workflow import Workflow
from .events import StartEvent, StopEvent
from .decorators import StepConfig
from .utils import get_steps_from_class, get_steps_from_instance
@deprecated(
reason="Install `llama-index-utils-workflow` and use the import `from llama_index.utils.workflow` instead."
)
def draw_all_possible_flows(
workflow: Workflow,
filename: str = "workflow_all_flows.html",
notebook: bool = False,
) -> None:
"""Draws all possible flows of the workflow."""
from pyvis.network import Network
net = Network(directed=True, height="750px", width="100%")
# Add the nodes + edge for stop events
net.add_node(
StopEvent.__name__,
label=StopEvent.__name__,
color="#FFA07A",
shape="ellipse",
)
net.add_node("_done", label="_done", color="#ADD8E6", shape="box")
net.add_edge(StopEvent.__name__, "_done")
# Add nodes from all steps
steps = get_steps_from_class(workflow)
if not steps:
# If no steps are defined in the class, try to get them from the instance
steps = get_steps_from_instance(workflow)
step_config: Optional[StepConfig] = None
for step_name, step_func in steps.items():
step_config = getattr(step_func, "__step_config", None)
if step_config is None:
continue
net.add_node(
step_name, label=step_name, color="#ADD8E6", shape="box"
) # Light blue for steps
for event_type in step_config.accepted_events:
net.add_node(
event_type.__name__,
label=event_type.__name__,
color="#90EE90" if event_type != StartEvent else "#E27AFF",
shape="ellipse",
) # Light green for events
# Add edges from all steps
for step_name, step_func in steps.items():
step_config = getattr(step_func, "__step_config", None)
if step_config is None:
continue
for return_type in step_config.return_types:
if return_type != type(None):
net.add_edge(step_name, return_type.__name__)
for event_type in step_config.accepted_events:
net.add_edge(event_type.__name__, step_name)
net.show(filename, notebook=notebook)
@deprecated(
reason="Install `llama-index-utils-workflow` and use the import `from llama_index.utils.workflow` instead."
)
def draw_most_recent_execution(
workflow: Workflow,
filename: str = "workflow_recent_execution.html",
notebook: bool = False,
) -> None:
"""Draws the most recent execution of the workflow."""
from pyvis.network import Network
net = Network(directed=True, height="750px", width="100%")
# Add nodes and edges based on execution history
existing_context = next(iter(workflow._contexts), None)
if existing_context is None:
raise ValueError("No runs found in workflow")
for i, (step, event) in enumerate(existing_context._accepted_events):
event_node = f"{event}_{i}"
step_node = f"{step}_{i}"
net.add_node(
event_node, label=event, color="#90EE90", shape="ellipse"
) # Light green for events
net.add_node(
step_node, label=step, color="#ADD8E6", shape="box"
) # Light blue for steps
net.add_edge(event_node, step_node)
if i > 0:
prev_step_node = f"{existing_context._accepted_events[i - 1][0]}_{i - 1}"
net.add_edge(prev_step_node, event_node)
net.show(filename, notebook=notebook)
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>> softmax_layer = keras.layers.activations.Softmax()
>>> input = np.array([1.0, 2.0, 1.0])
>>> result = softmax_layer(input)
>>> result
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self.built = True
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
from keras.src import activations
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
def _large_negative_number(dtype):
"""Return a Large negative number based on dtype."""
if backend.standardize_dtype(dtype) == "float16":
return -3e4
return -1e9
@keras_export("keras.layers.Softmax")
class Softmax(Layer):
"""Softmax activation layer.
Formula:
``` python
exp_x = exp(x - max(x))
f(x) = exp_x / sum(exp_x)
```
Example:
>>>softmax_layer = keras.layers.activations.Softmax()
>>>input = np.array([1.0, 2.0, 1.0])
>>>result = softmax_layer(input)
[0.21194157, 0.5761169, 0.21194157]
Args:
axis: Integer, or list of Integers, axis along which the softmax
normalization is applied.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Call arguments:
inputs: The inputs (logits) to the softmax layer.
mask: A boolean mask of the same shape as `inputs`. The mask
specifies 1 to keep and 0 to mask. Defaults to `None`.
Returns:
Softmaxed output with the same shape as `inputs`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.supports_masking = True
self.built = True
def call(self, inputs, mask=None):
if mask is not None:
adder = (
1.0 - backend.cast(mask, inputs.dtype)
) * _large_negative_number(inputs.dtype)
inputs += adder
if isinstance(self.axis, (tuple, list)):
if len(self.axis) > 1:
return backend.numpy.exp(
inputs
- backend.math.logsumexp(
inputs, axis=self.axis, keepdims=True
)
)
else:
return activations.softmax(inputs, axis=self.axis[0])
return activations.softmax(inputs, axis=self.axis)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
def compute_output_shape(self, input_shape):
return input_shape
|
"""Helper functions for managing the LangChain API.
This module is only relevant for LangChain developers, not for users.
.. warning::
This module and its submodules are for internal use only. Do not use them
in your own code. We may change the API at any time with no warning.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from .beta_decorator import (
LangChainBetaWarning,
beta,
suppress_langchain_beta_warning,
surface_langchain_beta_warnings,
)
from .deprecation import (
LangChainDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
from .path import as_import_path, get_relative_path
__all__ = (
"LangChainBetaWarning",
"LangChainDeprecationWarning",
"as_import_path",
"beta",
"deprecated",
"get_relative_path",
"suppress_langchain_beta_warning",
"suppress_langchain_deprecation_warning",
"surface_langchain_beta_warnings",
"surface_langchain_deprecation_warnings",
"warn_deprecated",
)
_dynamic_imports = {
"LangChainBetaWarning": "beta_decorator",
"beta": "beta_decorator",
"suppress_langchain_beta_warning": "beta_decorator",
"surface_langchain_beta_warnings": "beta_decorator",
"as_import_path": "path",
"get_relative_path": "path",
"LangChainDeprecationWarning": "deprecation",
"deprecated": "deprecation",
"surface_langchain_deprecation_warnings": "deprecation",
"suppress_langchain_deprecation_warning": "deprecation",
"warn_deprecated": "deprecation",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Helper functions for managing the LangChain API.
This module is only relevant for LangChain developers, not for users.
.. warning::
This module and its submodules are for internal use only. Do not use them
in your own code. We may change the API at any time with no warning.
"""
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from .beta_decorator import (
LangChainBetaWarning,
beta,
suppress_langchain_beta_warning,
surface_langchain_beta_warnings,
)
from .deprecation import (
LangChainDeprecationWarning,
deprecated,
suppress_langchain_deprecation_warning,
surface_langchain_deprecation_warnings,
warn_deprecated,
)
from .path import as_import_path, get_relative_path
__all__ = (
"as_import_path",
"beta",
"deprecated",
"get_relative_path",
"LangChainBetaWarning",
"LangChainDeprecationWarning",
"suppress_langchain_beta_warning",
"surface_langchain_beta_warnings",
"suppress_langchain_deprecation_warning",
"surface_langchain_deprecation_warnings",
"warn_deprecated",
)
_dynamic_imports = {
"LangChainBetaWarning": "beta_decorator",
"beta": "beta_decorator",
"suppress_langchain_beta_warning": "beta_decorator",
"surface_langchain_beta_warnings": "beta_decorator",
"as_import_path": "path",
"get_relative_path": "path",
"LangChainDeprecationWarning": "deprecation",
"deprecated": "deprecation",
"surface_langchain_deprecation_warnings": "deprecation",
"suppress_langchain_deprecation_warning": "deprecation",
"warn_deprecated": "deprecation",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
from ._transforms import BarkScale, BarkSpectrogram, InverseBarkScale
__all__ = [
"BarkScale",
"BarkSpectrogram",
"InverseBarkScale",
]
|
from ._transforms import (
AddNoise,
BarkScale,
BarkSpectrogram,
Convolve,
Deemphasis,
FFTConvolve,
InverseBarkScale,
Preemphasis,
Speed,
SpeedPerturbation,
)
__all__ = [
"AddNoise",
"BarkScale",
"BarkSpectrogram",
"Convolve",
"Deemphasis",
"FFTConvolve",
"InverseBarkScale",
"Preemphasis",
"SpeedPerturbation",
"Speed",
]
|
import os
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
import xgboost as xgb
from xgboost import callback
class SupportedTasks(object):
TRAIN = "train"
class XGBoostTrainer(Executor):
def __init__(self, server_address: str, world_size: int, server_cert_path: str,
client_key_path: str, client_cert_path: str, use_gpus: bool):
"""Trainer for federated XGBoost.
Args:
server_address: address for the gRPC server to connect to.
world_size: the number of sites.
server_cert_path: the path to the server certificate file.
client_key_path: the path to the client key file.
client_cert_path: the path to the client certificate file.
"""
super().__init__()
self._server_address = server_address
self._world_size = world_size
self._server_cert_path = server_cert_path
self._client_key_path = client_key_path
self._client_cert_path = client_cert_path
self._use_gpus = use_gpus
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext,
abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Executing {task_name}")
try:
if task_name == SupportedTasks.TRAIN:
self._do_training(fl_ctx)
return make_reply(ReturnCode.OK)
else:
self.log_error(fl_ctx, f"{task_name} is not a supported task.")
return make_reply(ReturnCode.TASK_UNKNOWN)
except BaseException as e:
self.log_exception(fl_ctx,
f"Task {task_name} failed. Exception: {e.__str__()}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _do_training(self, fl_ctx: FLContext):
client_name = fl_ctx.get_prop(FLContextKey.CLIENT_NAME)
rank = int(client_name.split('-')[1]) - 1
communicator_env = {
'xgboost_communicator': 'federated',
'federated_server_address': self._server_address,
'federated_world_size': self._world_size,
'federated_rank': rank,
'federated_server_cert': self._server_cert_path,
'federated_client_key': self._client_key_path,
'federated_client_cert': self._client_cert_path
}
with xgb.collective.CommunicatorContext(**communicator_env):
# Load file, file will not be sharded in federated mode.
if rank == 0:
label = '&label_column=0'
else:
label = ''
dtrain = xgb.DMatrix(f'higgs.train.csv?format=csv{label}', data_split_mode=1)
dtest = xgb.DMatrix(f'higgs.test.csv?format=csv{label}', data_split_mode=1)
# specify parameters via map
param = {
'validate_parameters': True,
'eta': 0.1,
'gamma': 1.0,
'max_depth': 8,
'min_child_weight': 100,
'tree_method': 'hist',
'grow_policy': 'depthwise',
'objective': 'binary:logistic',
'eval_metric': 'auc',
}
if self._use_gpus:
self.log_info(fl_ctx, f'Training with GPU {rank}')
param['device'] = f"cuda:{rank}"
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
# number of boosting rounds
num_round = 10
bst = xgb.train(param, dtrain, num_round, evals=watchlist, early_stopping_rounds=2)
# Save the model.
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = workspace.get_run_dir(run_number)
bst.save_model(os.path.join(run_dir, "higgs.model.federated.vertical.json"))
xgb.collective.communicator_print("Finished training\n")
|
import os
from nvflare.apis.executor import Executor
from nvflare.apis.fl_constant import FLContextKey, ReturnCode
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable, make_reply
from nvflare.apis.signal import Signal
import xgboost as xgb
from xgboost import callback
class SupportedTasks(object):
TRAIN = "train"
class XGBoostTrainer(Executor):
def __init__(self, server_address: str, world_size: int, server_cert_path: str,
client_key_path: str, client_cert_path: str, use_gpus: bool):
"""Trainer for federated XGBoost.
Args:
server_address: address for the gRPC server to connect to.
world_size: the number of sites.
server_cert_path: the path to the server certificate file.
client_key_path: the path to the client key file.
client_cert_path: the path to the client certificate file.
"""
super().__init__()
self._server_address = server_address
self._world_size = world_size
self._server_cert_path = server_cert_path
self._client_key_path = client_key_path
self._client_cert_path = client_cert_path
self._use_gpus = use_gpus
def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext,
abort_signal: Signal) -> Shareable:
self.log_info(fl_ctx, f"Executing {task_name}")
try:
if task_name == SupportedTasks.TRAIN:
self._do_training(fl_ctx)
return make_reply(ReturnCode.OK)
else:
self.log_error(fl_ctx, f"{task_name} is not a supported task.")
return make_reply(ReturnCode.TASK_UNKNOWN)
except BaseException as e:
self.log_exception(fl_ctx,
f"Task {task_name} failed. Exception: {e.__str__()}")
return make_reply(ReturnCode.EXECUTION_EXCEPTION)
def _do_training(self, fl_ctx: FLContext):
client_name = fl_ctx.get_prop(FLContextKey.CLIENT_NAME)
rank = int(client_name.split('-')[1]) - 1
communicator_env = {
'xgboost_communicator': 'federated',
'federated_server_address': self._server_address,
'federated_world_size': self._world_size,
'federated_rank': rank,
'federated_server_cert': self._server_cert_path,
'federated_client_key': self._client_key_path,
'federated_client_cert': self._client_cert_path
}
with xgb.collective.CommunicatorContext(**communicator_env):
# Load file, file will not be sharded in federated mode.
if rank == 0:
label = '&label_column=0'
else:
label = ''
dtrain = xgb.DMatrix(f'higgs.train.csv?format=csv{label}', data_split_mode=1)
dtest = xgb.DMatrix(f'higgs.test.csv?format=csv{label}', data_split_mode=1)
# specify parameters via map
param = {
'validate_parameters': True,
'eta': 0.1,
'gamma': 1.0,
'max_depth': 8,
'min_child_weight': 100,
'tree_method': 'hist',
'grow_policy': 'depthwise',
'objective': 'binary:logistic',
'eval_metric': 'auc',
}
if self._use_gpus:
if self._use_gpus:
self.log_info(fl_ctx, f'Training with GPU {rank}')
param['device'] = f"cuda:{rank}"
# specify validations set to watch performance
watchlist = [(dtest, "eval"), (dtrain, "train")]
# number of boosting rounds
num_round = 10
bst = xgb.train(param, dtrain, num_round, evals=watchlist, early_stopping_rounds=2)
# Save the model.
workspace = fl_ctx.get_prop(FLContextKey.WORKSPACE_OBJECT)
run_number = fl_ctx.get_prop(FLContextKey.CURRENT_RUN)
run_dir = workspace.get_run_dir(run_number)
bst.save_model(os.path.join(run_dir, "higgs.model.federated.vertical.json"))
xgb.collective.communicator_print("Finished training\n")
|
from typing import Union, Dict, Any
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(text=response.text, raw=raw)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
try:
# When the response contains only a function call, the library
# raises an exception.
# The easiest way to detect this is to try access the text attribute and
# catch the exception.
# https://github.com/google-gemini/generative-ai-python/issues/670
text = response.text
except (ValueError, AttributeError):
text = None
additional_kwargs: Dict[str, Any] = {}
for part in response.parts:
if fn := part.function_call:
if "tool_calls" not in additional_kwargs:
additional_kwargs["tool_calls"] = []
additional_kwargs["tool_calls"].append(fn)
return ChatResponse(
message=ChatMessage(
role=role, content=text, additional_kwargs=additional_kwargs
),
raw=raw,
additional_kwargs=additional_kwargs,
)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
for block in message.blocks:
if isinstance(block, TextBlock):
parts.append(block.text)
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
|
from typing import Union
import google.ai.generativelanguage as glm
import google.generativeai as genai
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
ImageBlock,
TextBlock,
)
from llama_index.core.multi_modal_llms.base import ChatMessage
from llama_index.core.utilities.gemini_utils import ROLES_FROM_GEMINI, ROLES_TO_GEMINI
def _error_if_finished_early(candidate: "glm.Candidate") -> None: # type: ignore[name-defined] # only until release
if (finish_reason := candidate.finish_reason) > 1: # 1=STOP (normally)
reason = finish_reason.name
# Safety reasons have more detail, so include that if we can.
if finish_reason == 3: # 3=Safety
relevant_safety = list(
filter(
lambda sr: sr.probability > 1, # 1=Negligible
candidate.safety_ratings,
)
)
reason += f" {relevant_safety}"
raise RuntimeError(f"Response was terminated early: {reason}")
def completion_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> CompletionResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
return CompletionResponse(text=response.text, raw=raw)
def chat_from_gemini_response(
response: Union[
"genai.types.GenerateContentResponse",
"genai.types.AsyncGenerateContentResponse",
],
) -> ChatResponse:
top_candidate = response.candidates[0]
_error_if_finished_early(top_candidate)
raw = {
**(type(top_candidate).to_dict(top_candidate)), # type: ignore
**(type(response.prompt_feedback).to_dict(response.prompt_feedback)), # type: ignore
}
if response.usage_metadata:
raw["usage_metadata"] = type(response.usage_metadata).to_dict(
response.usage_metadata
)
role = ROLES_FROM_GEMINI[top_candidate.content.role]
return ChatResponse(message=ChatMessage(role=role, content=response.text), raw=raw)
def chat_message_to_gemini(message: ChatMessage) -> "genai.types.ContentDict":
"""Convert ChatMessages to Gemini-specific history, including ImageDocuments."""
parts = []
content_txt = ""
for block in message.blocks:
if isinstance(block, TextBlock):
parts.append(block.text)
elif isinstance(block, ImageBlock):
base64_bytes = block.resolve_image(as_base64=False).read()
parts.append(
{
"mime_type": block.image_mimetype,
"data": base64_bytes,
}
)
else:
msg = f"Unsupported content block type: {type(block).__name__}"
raise ValueError(msg)
return {
"role": ROLES_TO_GEMINI[message.role],
"parts": parts,
}
|
import pytest
from jina import Flow
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run(protocol):
f = Flow(protocol=protocol).add()
with f:
dry_run = f.dry_run()
dry_run_negative = f.dry_run()
assert dry_run
assert not dry_run_negative
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
@pytest.mark.parametrize('show_table', [True, False])
def test_profiling(protocol, show_table):
f = Flow(protocol=protocol).add(name='hello').add(name='world')
with f:
results = f.profiling(show_table=show_table)
assert results
assert 'hello' in results
assert 'world' in results
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_profiling_async(protocol):
f = Flow(protocol=protocol, asyncio=True).add(name='hello').add(name='world')
with f:
results = await f.profiling()
assert results
assert 'hello' in results
assert 'world' in results
|
import pytest
from jina import Flow
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run(protocol):
f = Flow(protocol=protocol).add()
with f:
dry_run = f.dry_run()
dry_run_negative = f.dry_run()
assert dry_run
assert not dry_run_negative
|
"""Test Self-hosted LLMs."""
import pickle
from typing import Any, List, Optional
from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline
model_reqs = ["pip:./", "transformers", "torch"]
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
return rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
def test_self_hosted_huggingface_pipeline_text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="gpt2",
task="text-generation",
model_kwargs={"n_positions": 1024},
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_text2text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text2text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-small",
task="text2text-generation",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_summarization() -> None:
"""Test valid call to self-hosted HuggingFace summarization model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="facebook/bart-large-cnn",
task="summarization",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def load_pipeline() -> Any:
"""Load pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
return pipe
def inference_fn(pipeline: Any, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Inference function for testing."""
return pipeline(prompt)[0]["generated_text"]
def test_init_with_local_pipeline() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(
pipeline=pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_init_with_pipeline_path() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
import runhouse as rh
rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
gpu, path="models"
)
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def test_init_with_pipeline_fn() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
|
"""Test Self-hosted LLMs."""
import pickle
from typing import Any, List, Optional
from langchain_community.llms import SelfHostedHuggingFaceLLM, SelfHostedPipeline
model_reqs = ["pip:./", "transformers", "torch"]
def get_remote_instance() -> Any:
"""Get remote instance for testing."""
import runhouse as rh
return rh.cluster(name="rh-a10x", instance_type="A100:1", use_spot=False)
def test_self_hosted_huggingface_pipeline_text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="gpt2",
task="text-generation",
model_kwargs={"n_positions": 1024},
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_text2text_generation() -> None:
"""Test valid call to self-hosted HuggingFace text2text generation model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-small",
task="text2text-generation",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_self_hosted_huggingface_pipeline_summarization() -> None:
"""Test valid call to self-hosted HuggingFace summarization model."""
gpu = get_remote_instance()
llm = SelfHostedHuggingFaceLLM(
model_id="facebook/bart-large-cnn",
task="summarization",
hardware=gpu,
model_reqs=model_reqs,
)
output = llm.invoke("Say foo:")
assert isinstance(output, str)
def load_pipeline() -> Any:
"""Load pipeline for testing."""
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer, max_new_tokens=10
)
return pipe
def inference_fn(pipeline: Any, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Inference function for testing."""
return pipeline(prompt)[0]["generated_text"]
def test_init_with_local_pipeline() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
llm = SelfHostedPipeline.from_pipeline(
pipeline=pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_path() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
pipeline = load_pipeline()
import runhouse as rh
rh.blob(pickle.dumps(pipeline), path="models/pipeline.pkl").save().to(
gpu, path="models"
)
llm = SelfHostedPipeline.from_pipeline(
pipeline="models/pipeline.pkl",
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
def test_init_with_pipeline_fn() -> None:
"""Test initialization with a self-hosted HF pipeline."""
gpu = get_remote_instance()
llm = SelfHostedPipeline(
model_load_fn=load_pipeline,
hardware=gpu,
model_reqs=model_reqs,
inference_fn=inference_fn,
)
output = llm.invoke("Say foo:") # type: ignore
assert isinstance(output, str)
|
from typing import TYPE_CHECKING, TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
if TYPE_CHECKING:
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(
self: T, samples: int, multiple_geometries: bool = False
) -> 'PointsAndColors':
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
from docarray.documents.point_cloud.points_and_colors import PointsAndColors
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
points = parse_obj_as(NdArray, point_cloud)
return PointsAndColors(points=points, colors=None)
def display(self, samples: int = 10000) -> None:
"""
Plot point cloud from url.
To use this you need to install trimesh[easy]: `pip install 'trimesh[easy]'`.
First, it loads the point cloud into a :class:`PointsAndColors` object, and then
calls display on it. The following is therefore equivalent:
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.documents import PointCloud3D
pc = PointCloud3D("toydata/tetrahedron.obj")
# option 1
pc.url.display()
# option 2 (equivalent)
pc.url.load(samples=10000).display()
:param samples: number of points to sample from the mesh.
"""
self.load(samples=samples).display()
|
from typing import TypeVar
import numpy as np
from pydantic import parse_obj_as
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.url.url_3d.url_3d import Url3D
T = TypeVar('T', bound='PointCloud3DUrl')
@_register_proto(proto_type_name='point_cloud_url')
class PointCloud3DUrl(Url3D):
"""
URL to a .obj, .glb, or .ply file containing point cloud information.
Can be remote (web) URL, or a local file path.
"""
def load(self: T, samples: int, multiple_geometries: bool = False) -> NdArray:
"""
Load the data from the url into an NdArray containing point cloud information.
EXAMPLE USAGE
.. code-block:: python
import numpy as np
from docarray import BaseDocument
from docarray.typing import PointCloud3DUrl
class MyDoc(BaseDocument):
point_cloud_url: PointCloud3DvUrl
doc = MyDoc(point_cloud_url="toydata/tetrahedron.obj")
point_cloud = doc.point_cloud_url.load(samples=100)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (100, 3)
:param samples: number of points to sample from the mesh
:param multiple_geometries: if False, store point cloud in 2D np.ndarray.
If True, store point clouds from multiple geometries in 3D np.ndarray.
:return: np.ndarray representing the point cloud
"""
if multiple_geometries:
# try to coerce everything into a scene
scene = self._load_trimesh_instance(force='scene')
point_cloud = np.stack(
[np.array(geo.sample(samples)) for geo in scene.geometry.values()],
axis=0,
)
else:
# combine a scene into a single mesh
mesh = self._load_trimesh_instance(force='mesh')
point_cloud = np.array(mesh.sample(samples))
return parse_obj_as(NdArray, point_cloud)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
all inputs is unchanged.
Note that the `Dropout` layer only applies when `training` is set to `True`
in `call()`, such that no values are dropped during inference.
When using `model.fit`, `training` will be appropriately set to `True`
automatically. In other contexts, you can set the argument explicitly
to `True` when calling the layer.
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
`trainable` does not affect the layer's behavior, as `Dropout` does
not have any variables/weights that can be frozen during training.)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
all inputs is unchanged.
Note that the `Dropout` layer only applies when `training` is set to `True`
in `call()`, such that no values are dropped during inference.
When using `model.fit`, `training` will be appropriately set to `True`
automatically. In other contexts, you can set the argument explicitly
to `True` when calling the layer.
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
`trainable` does not affect the layer's behavior, as `Dropout` does
not have any variables/weights that can be frozen during training.)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .opus decoding is supported if libsndfile >= 1.0.31:
# AUDIO_EXTENSIONS.extend([".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
logger = datasets.utils.logging.get_logger(__name__)
class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig):
"""Builder Config for AudioFolder."""
drop_labels: bool = None
drop_metadata: bool = None
class AudioFolder(folder_based_builder.FolderBasedBuilder):
BASE_FEATURE = datasets.Audio
BASE_COLUMN_NAME = "audio"
BUILDER_CONFIG_CLASS = AudioFolderConfig
EXTENSIONS: List[str] # definition at the bottom of the script
CLASSIFICATION_TASK = AudioClassification(audio_column="audio", label_column="label")
# Obtained with:
# ```
# import soundfile as sf
#
# AUDIO_EXTENSIONS = [f".{format.lower()}" for format in sf.available_formats().keys()]
#
# # .mp3 is currently decoded via `torchaudio`, .opus decoding is supported if version of `libsndfile` >= 1.0.30:
# AUDIO_EXTENSIONS.extend([".mp3", ".opus"])
# ```
# We intentionally do not run this code on launch because:
# (1) Soundfile is an optional dependency, so importing it in global namespace is not allowed
# (2) To ensure the list of supported extensions is deterministic
AUDIO_EXTENSIONS = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS
|
import pytest
from llama_index.llms.bedrock_converse.utils import get_model_name
from io import BytesIO
from unittest.mock import MagicMock, patch
from llama_index.core.base.llms.types import (
AudioBlock,
ImageBlock,
MessageRole,
TextBlock,
)
from llama_index.llms.bedrock_converse.utils import (
__get_img_format_from_image_mimetype,
_content_block_to_bedrock_format,
)
def test_get_model_name_translates_us():
assert (
get_model_name("us.meta.llama3-2-3b-instruct-v1:0")
== "meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_cn():
assert (
get_model_name("cn.meta.llama3-2-3b-instruct-v1:0")
== "cn.meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_unsupported():
assert get_model_name("cohere.command-r-plus-v1:0") == "cohere.command-r-plus-v1:0"
def test_get_model_name_throws_inference_profile_exception():
with pytest.raises(ValueError):
assert get_model_name("us.cohere.command-r-plus-v1:0")
def test_get_img_format_jpeg():
assert __get_img_format_from_image_mimetype("image/jpeg") == "jpeg"
def test_get_img_format_png():
assert __get_img_format_from_image_mimetype("image/png") == "png"
def test_get_img_format_gif():
assert __get_img_format_from_image_mimetype("image/gif") == "gif"
def test_get_img_format_webp():
assert __get_img_format_from_image_mimetype("image/webp") == "webp"
def test_get_img_format_unsupported(caplog):
result = __get_img_format_from_image_mimetype("image/unsupported")
assert result == "png"
assert "Unsupported image mimetype" in caplog.text
def test_content_block_to_bedrock_format_text():
text_block = TextBlock(text="Hello, world!")
result = _content_block_to_bedrock_format(text_block, MessageRole.USER)
assert result == {"text": "Hello, world!"}
@patch("llama_index.core.base.llms.types.ImageBlock.resolve_image")
def test_content_block_to_bedrock_format_image_user(mock_resolve):
mock_bytes = BytesIO(b"fake_image_data")
mock_bytes.read = MagicMock(return_value=b"fake_image_data")
mock_resolve.return_value = mock_bytes
image_block = ImageBlock(image=b"", image_mimetype="image/png")
result = _content_block_to_bedrock_format(image_block, MessageRole.USER)
assert "image" in result
assert result["image"]["format"] == "png"
assert "bytes" in result["image"]["source"]
mock_resolve.assert_called_once()
@patch("llama_index.core.base.llms.types.ImageBlock.resolve_image")
def test_content_block_to_bedrock_format_image_assistant(mock_resolve, caplog):
image_block = ImageBlock(image=b"", image_mimetype="image/png")
result = _content_block_to_bedrock_format(image_block, MessageRole.ASSISTANT)
assert result is None
assert "only supports image blocks for user messages" in caplog.text
mock_resolve.assert_not_called()
def test_content_block_to_bedrock_format_audio(caplog):
audio_block = AudioBlock(audio=b"test_audio")
result = _content_block_to_bedrock_format(audio_block, MessageRole.USER)
assert result is None
assert "Audio blocks are not supported" in caplog.text
def test_content_block_to_bedrock_format_unsupported(caplog):
unsupported_block = object()
result = _content_block_to_bedrock_format(unsupported_block, MessageRole.USER)
assert result is None
assert "Unsupported block type" in caplog.text
assert str(type(unsupported_block)) in caplog.text
|
import pytest
from llama_index.llms.bedrock_converse.utils import get_model_name
from io import BytesIO
from unittest.mock import MagicMock, patch
from llama_index.core.base.llms.types import (
AudioBlock,
ImageBlock,
MessageRole,
TextBlock,
)
from llama_index.llms.bedrock_converse.utils import (
__get_img_format_from_image_mimetype,
_content_block_to_bedrock_format,
)
def test_get_model_name_translates_us():
assert (
get_model_name("us.meta.llama3-2-3b-instruct-v1:0")
== "meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_cn():
assert (
get_model_name("cn.meta.llama3-2-3b-instruct-v1:0")
== "cn.meta.llama3-2-3b-instruct-v1:0"
)
def test_get_model_name_does_nottranslate_unsupported():
assert get_model_name("cohere.command-r-plus-v1:0") == "cohere.command-r-plus-v1:0"
def test_get_model_name_throws_inference_profile_exception():
with pytest.raises(ValueError):
assert get_model_name("us.cohere.command-r-plus-v1:0")
def test_get_img_format_jpeg():
assert __get_img_format_from_image_mimetype("image/jpeg") == "jpeg"
def test_get_img_format_png():
assert __get_img_format_from_image_mimetype("image/png") == "png"
def test_get_img_format_gif():
assert __get_img_format_from_image_mimetype("image/gif") == "gif"
def test_get_img_format_webp():
assert __get_img_format_from_image_mimetype("image/webp") == "webp"
def test_get_img_format_unsupported(caplog):
result = __get_img_format_from_image_mimetype("image/unsupported")
assert result == "png"
assert "Unsupported image mimetype" in caplog.text
def test_content_block_to_bedrock_format_text():
text_block = TextBlock(text="Hello, world!")
result = _content_block_to_bedrock_format(text_block, MessageRole.USER)
assert result == {"text": "Hello, world!"}
@patch("llama_index.core.base.llms.types.ImageBlock.resolve_image")
def test_content_block_to_bedrock_format_image_user(mock_resolve):
mock_bytes = BytesIO(b"fake_image_data")
mock_bytes.read = MagicMock(return_value=b"fake_image_data")
mock_resolve.return_value = mock_bytes
image_block = ImageBlock(image=b"", image_mimetype="image/png")
result = _content_block_to_bedrock_format(image_block, MessageRole.USER)
assert "image" in result
assert result["image"]["format"] == "png"
assert "bytes" in result["image"]["source"]
mock_resolve.assert_called_once()
@patch("llama_index.core.base.llms.types.ImageBlock.resolve_image")
def test_content_block_to_bedrock_format_image_assistant(mock_resolve, caplog):
image_block = ImageBlock(image=b"", image_mimetype="image/png")
result = _content_block_to_bedrock_format(image_block, MessageRole.ASSISTANT)
assert result is None
assert "only supports image blocks for user messages" in caplog.text
mock_resolve.assert_not_called()
def test_content_block_to_bedrock_format_audio(caplog):
audio_block = AudioBlock(audio=b"test_audio")
result = _content_block_to_bedrock_format(audio_block, MessageRole.USER)
assert result is None
assert "Audio blocks are not supported" in caplog.text
def test_content_block_to_bedrock_format_unsupported(caplog):
unsupported_block = object()
result = _content_block_to_bedrock_format(unsupported_block, MessageRole.USER)
assert result is None
assert "Unsupported block type" in caplog.text
assert str(type(unsupported_block)) in caplog.text
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.dtypes import result_type as result_type
from keras.src.backend.common.global_state import clear_session as clear_session
from keras.src.backend.common.keras_tensor import (
is_keras_tensor as is_keras_tensor,
)
from keras.src.backend.common.variables import is_float_dtype as is_float_dtype
from keras.src.backend.common.variables import is_int_dtype as is_int_dtype
from keras.src.backend.common.variables import (
standardize_dtype as standardize_dtype,
)
from keras.src.backend.config import backend as backend
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.utils.naming import get_uid as get_uid
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.config import backend
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.utils.naming import get_uid
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token(hf_api: HfApi):
previous_token = HfFolder.get_token()
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(previous_token)
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id):
try:
yield repo_id
finally:
cleanup_repo(repo_id)
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e3)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
from sentence_transformers import SentenceTransformer
import torch
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
try:
import sklearn
except ImportError:
sklearn = None
def _validate_data(estimator, *args, **kwargs):
"""Validate the input data.
wrapper for sklearn.utils.validation.validate_data or
BaseEstimator._validate_data depending on the scikit-learn version.
TODO: remove when minimum scikit-learn version is 1.6
"""
try:
# scikit-learn >= 1.6
from sklearn.utils.validation import validate_data
return validate_data(estimator, *args, **kwargs)
except ImportError:
return estimator._validate_data(*args, **kwargs)
except:
raise
def type_of_target(y, input_name="", *, raise_unknown=False):
def _raise_or_return(target_type):
"""Depending on the value of raise_unknown, either raise an error or
return 'unknown'.
"""
if raise_unknown and target_type == "unknown":
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return target_type
from sklearn.utils.multiclass import type_of_target as sk_type_of_target
target_type = sk_type_of_target(y, input_name=input_name)
return _raise_or_return(target_type)
def _routing_enabled():
"""Return whether metadata routing is enabled.
Returns:
enabled : bool
Whether metadata routing is enabled. If the config is not set, it
defaults to False.
TODO: remove when the config key is no longer available in scikit-learn
"""
return sklearn.get_config().get("enable_metadata_routing", False)
def _raise_for_params(params, owner, method):
"""Raise an error if metadata routing is not enabled and params are passed.
Parameters:
params : dict
The metadata passed to a method.
owner : object
The object to which the method belongs.
method : str
The name of the method, e.g. "fit".
Raises:
ValueError
If metadata routing is not enabled and params are passed.
"""
caller = (
f"{owner.__class__.__name__}.{method}"
if method
else owner.__class__.__name__
)
if not _routing_enabled() and params:
raise ValueError(
f"Passing extra keyword arguments to {caller} is only supported if"
" enable_metadata_routing=True, which you can set using"
" `sklearn.set_config`. See the User Guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
f" details. Extra parameters passed are: {set(params)}"
)
|
try:
import sklearn
except ImportError:
sklearn = None
def _validate_data(estimator, *args, **kwargs):
"""Validate the input data.
wrapper for sklearn.utils.validation.validate_data or
BaseEstimator._validate_data depending on the scikit-learn version.
TODO: remove when minimum scikit-learn version is 1.6
"""
try:
# scikit-learn >= 1.6
from sklearn.utils.validation import validate_data
return validate_data(estimator, *args, **kwargs)
except ImportError:
return estimator._validate_data(*args, **kwargs)
except:
raise
def type_of_target(y, input_name="", *, raise_unknown=False):
def _raise_or_return(target_type):
"""Depending on the value of raise_unknown, either raise an error or
return 'unknown'.
"""
if raise_unknown and target_type == "unknown":
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return target_type
target_type = sklearn.utils.multiclass.type_of_target(
y, input_name=input_name
)
return _raise_or_return(target_type)
def _routing_enabled():
"""Return whether metadata routing is enabled.
Returns:
enabled : bool
Whether metadata routing is enabled. If the config is not set, it
defaults to False.
TODO: remove when the config key is no longer available in scikit-learn
"""
return sklearn.get_config().get("enable_metadata_routing", False)
def _raise_for_params(params, owner, method):
"""Raise an error if metadata routing is not enabled and params are passed.
Parameters:
params : dict
The metadata passed to a method.
owner : object
The object to which the method belongs.
method : str
The name of the method, e.g. "fit".
Raises:
ValueError
If metadata routing is not enabled and params are passed.
"""
caller = (
f"{owner.__class__.__name__}.{method}"
if method
else owner.__class__.__name__
)
if not _routing_enabled() and params:
raise ValueError(
f"Passing extra keyword arguments to {caller} is only supported if"
" enable_metadata_routing=True, which you can set using"
" `sklearn.set_config`. See the User Guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
f" details. Extra parameters passed are: {set(params)}"
)
|
from llama_index.core.instrumentation.events import BaseEvent
class ExceptionEvent(BaseEvent):
"""
ExceptionEvent.
Args:
exception (BaseException): exception.
"""
exception: BaseException
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ExceptionEvent"
|
from llama_index.core.instrumentation.events import BaseEvent
class ExceptionEvent(BaseEvent):
"""ExceptionEvent.
Args:
exception (BaseException): exception.
"""
exception: BaseException
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "ExceptionEvent"
|
from __future__ import annotations
from typing import Literal
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.1,
margin_strategy: Literal["absolute", "relative"] = "absolute",
margin: float = 0.0,
) -> None:
"""
This loss is used to train a SparseEncoder model using the GISTEmbed algorithm.
It takes a model and a guide model as input, and uses the guide model to guide the
in-batch negative sample selection. The cosine similarity is used to compute the loss
and the temperature parameter is used to scale the cosine similarities.
You can apply different false-negative filtering strategies to discard hard negatives that are too similar to
the positive. Two strategies are supported:
- "absolute": Discards negatives whose similarity score is greater than or equal to ``positive_score - margin``.
- "relative": Discards negatives whose similarity score is greater than or equal to ``positive_score * (1 - margin)``.
Args:
model: SparseEncoder model based on a `transformers` model.
guide: SparseEncoder model to guide the in-batch negative sample selection.
temperature: Temperature parameter to scale the cosine similarities. Defaults to 0.1, adapted for Sparse embeddings.
Experimentation is recommended.
margin_strategy: Strategy used for false negative filtering. One of {"absolute", "relative"}.
margin: The margin value for filtering negatives. Defaults to 0.0, together with the "absolute" strategy,
this only removes negatives that are more similar to the query than the positive is to the query.
References:
- For further details, see: https://arxiv.org/abs/2402.16829
Requirements:
1. (anchor, positive, negative) triplets
2. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseMultipleNegativesRankingLoss` is similar to this loss, but it does not use
a guide model to guide the in-batch negative sample selection. :class:`SparseGISTEmbedLoss` yields
a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
# Initialize the SPLADE model
model = SparseEncoder("distilbert/distilbert-base-uncased")
guide = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SparseGISTEmbedLoss(model, guide=guide)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(
model, guide=guide, temperature=temperature, margin_strategy=margin_strategy, margin=margin
)
|
from __future__ import annotations
from typing import Literal
from sentence_transformers.losses.GISTEmbedLoss import GISTEmbedLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseGISTEmbedLoss(GISTEmbedLoss):
def __init__(
self,
model: SparseEncoder,
guide: SparseEncoder,
temperature: float = 0.1,
margin_strategy: Literal["absolute", "relative"] = "absolute",
margin: float = 0.0,
) -> None:
"""
This loss is used to train a SparseEncoder model using the GISTEmbed algorithm.
It takes a model and a guide model as input, and uses the guide model to guide the
in-batch negative sample selection. The cosine similarity is used to compute the loss
and the temperature parameter is used to scale the cosine similarities.
You can apply different false-negative filtering strategies to discard hard negatives that are too similar to
the positive. Two strategies are supported:
- "absolute": Discards negatives whose similarity score is greater than or equal to ``positive_score - margin``.
- "relative": Discards negatives whose similarity score is greater than or equal to ``positive_score * (1 - margin)``.
Args:
model: SparseEncoder model based on a `transformers` model.
guide: SparseEncoder model to guide the in-batch negative sample selection.
temperature: Temperature parameter to scale the cosine similarities, default is 0.1 here adapted for Sparse embeddings,
might need some adaptations.
margin_strategy: Strategy used for false negative filtering. One of {"absolute", "relative"}.
margin: The margin value for filtering negatives. Defaults to 0.0, together with the "absolute" strategy,
this only removes negatives that are more similar to the query than the positive is to the query.
References:
- For further details, see: https://arxiv.org/abs/2402.16829
Requirements:
1. (anchor, positive, negative) triplets
2. (anchor, positive) pairs
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
| (anchor, positive) pairs | none |
+---------------------------------------+--------+
Recommendations:
- Use ``BatchSamplers.NO_DUPLICATES`` (:class:`docs <sentence_transformers.training_args.BatchSamplers>`) to
ensure that no in-batch negatives are duplicates of the anchor or positive samples.
Relations:
- :class:`SparseMultipleNegativesRankingLoss` is similar to this loss, but it does not use
a guide model to guide the in-batch negative sample selection. `SparseGISTEmbedLoss` yields
a stronger training signal at the cost of some training overhead.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
# Initialize the SPLADE model
model = SparseEncoder("distilbert/distilbert-base-uncased")
guide = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
}
)
loss = losses.SparseGISTEmbedLoss(model, guide=guide)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(
model, guide=guide, temperature=temperature, margin_strategy=margin_strategy, margin=margin
)
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import tv_tensors
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import is_pure_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_pure_tensor, tv_tensors.Image, tv_tensors.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [tv_tensors.Image, tv_tensors.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `tv_tensors.Image` or `tv_tensors.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `tv_tensors.Image` or `tv_tensors.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
import functools
import warnings
from collections import defaultdict
from typing import Any, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union
import torch
from torchvision import datapoints
from torchvision.transforms.v2 import Transform
from torchvision.transforms.v2._utils import is_pure_tensor
T = TypeVar("T")
def _default_arg(value: T) -> T:
return value
def _get_defaultdict(default: T) -> Dict[Any, T]:
# This weird looking construct only exists, since `lambda`'s cannot be serialized by pickle.
# If it were possible, we could replace this with `defaultdict(lambda: default)`
return defaultdict(functools.partial(_default_arg, default))
class PermuteDimensions(Transform):
_transformed_types = (is_pure_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Sequence[int], Dict[Type, Optional[Sequence[int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.permute(*dims)
class TransposeDimensions(Transform):
_transformed_types = (is_pure_tensor, datapoints.Image, datapoints.Video)
def __init__(self, dims: Union[Tuple[int, int], Dict[Type, Optional[Tuple[int, int]]]]) -> None:
super().__init__()
if not isinstance(dims, dict):
dims = _get_defaultdict(dims)
if torch.Tensor in dims and any(cls in dims for cls in [datapoints.Image, datapoints.Video]):
warnings.warn(
"Got `dims` values for `torch.Tensor` and either `datapoints.Image` or `datapoints.Video`. "
"Note that a plain `torch.Tensor` will *not* be transformed by this (or any other transformation) "
"in case a `datapoints.Image` or `datapoints.Video` is present in the input."
)
self.dims = dims
def _transform(self, inpt: Any, params: Dict[str, Any]) -> torch.Tensor:
dims = self.dims[type(inpt)]
if dims is None:
return inpt.as_subclass(torch.Tensor)
return inpt.transpose(*dims)
|
from langchain_core.tracers.langchain_v1 import LangChainTracerV1, get_headers
__all__ = ["LangChainTracerV1", "get_headers"]
|
from langchain_core.tracers.langchain_v1 import LangChainTracerV1, get_headers
__all__ = ["get_headers", "LangChainTracerV1"]
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import _get_librispeech_metadata
from torchaudio.datasets.utils import _extract_tar
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
_SUBSET_MAP = {"10min": ["1h/0"], "1h": ["1h/*"], "10h": ["1h/*", "9h"]}
def _get_fileids_paths(path: Path, folders: List[str], _ext_audio: str) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
Args:
path (Path): Root path to the dataset.
folders (List[str]): Folders that contain the desired audio files.
_ext_audio (str): Extension of audio files.
Returns:
List[Tuple[str, str]]:
List of tuples where the first element is the relative path to the audio file.
The format of relative path is like:
1h/[0-5]/[clean, other] or 9h/[clean, other]
The second element is the file name without audio extension.
"""
path = Path(path)
files_paths = []
for folder in folders:
paths = [p.relative_to(path) for p in path.glob(f"{folder}/*/*/*/*{_ext_audio}")]
files_paths += [(str(p.parent.parent.parent), str(p.stem)) for p in paths] # get subset folder and file name
files_paths.sort(key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Subset of Libri-light :cite:`librilight` dataset,
which was used in HuBERT :cite:`hsu2021hubert` for supervised fine-tuning.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``"10min"``, ``"1h"``, ``"10h"``]
(Default: ``"10min"``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in _SUBSET_MAP:
raise ValueError(f"`subset` must be one of {_SUBSET_MAP.keys()}. Found: {subset}")
folders = _SUBSET_MAP[subset]
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
_extract_tar(archive)
self._fileids_paths = _get_fileids_paths(self._path, folders, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
int:
Speaker ID
int:
Chapter ID
int:
Utterance ID
"""
file_path, fileid = self._fileids_paths[n]
metadata = _get_librispeech_metadata(fileid, self._path, file_path, self._ext_audio, self._ext_txt)
waveform, _ = torchaudio.load(os.path.join(self._path, metadata[0]))
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self._fileids_paths)
|
import os
from pathlib import Path
from typing import List, Tuple, Union
import torchaudio
from torch import Tensor
from torch.hub import download_url_to_file
from torch.utils.data import Dataset
from torchaudio.datasets.librispeech import _get_librispeech_metadata
from torchaudio.datasets.utils import extract_archive
_ARCHIVE_NAME = "librispeech_finetuning"
_URL = "https://dl.fbaipublicfiles.com/librilight/data/librispeech_finetuning.tgz"
_CHECKSUM = "5d1efdc777b548194d7e09ba89126e2188026df9fd57aa57eb14408d2b2342af"
_SUBSET_MAP = {"10min": ["1h/0"], "1h": ["1h/*"], "10h": ["1h/*", "9h"]}
def _get_fileids_paths(path: Path, folders: List[str], _ext_audio: str) -> List[Tuple[str, str]]:
"""Get the file names and the corresponding file paths without `speaker_id`
and `chapter_id` directories.
The format of path is like:
{root}/{_ARCHIVE_NAME}/1h/[0-5]/[clean, other] or
{root}/{_ARCHIVE_NAME}/9h/[clean, other]
Args:
path (Path): Root path to the dataset.
folders (List[str]): Folders that contain the desired audio files.
_ext_audio (str): Extension of audio files.
Returns:
List[Tuple[str, str]]:
List of tuples where the first element is the relative path to the audio file.
The format of relative path is like:
1h/[0-5]/[clean, other] or 9h/[clean, other]
The second element is the file name without audio extension.
"""
path = Path(path)
files_paths = []
for folder in folders:
paths = [p.relative_to(path) for p in path.glob(f"{folder}/*/*/*/*{_ext_audio}")]
files_paths += [(str(p.parent.parent.parent), str(p.stem)) for p in paths] # get subset folder and file name
files_paths.sort(key=lambda x: x[0] + x[1])
return files_paths
class LibriLightLimited(Dataset):
"""Subset of Libri-light :cite:`librilight` dataset,
which was used in HuBERT :cite:`hsu2021hubert` for supervised fine-tuning.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
subset (str, optional): The subset to use. Options: [``"10min"``, ``"1h"``, ``"10h"``]
(Default: ``"10min"``).
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_txt = ".trans.txt"
_ext_audio = ".flac"
def __init__(
self,
root: Union[str, Path],
subset: str = "10min",
download: bool = False,
) -> None:
if subset not in _SUBSET_MAP:
raise ValueError(f"`subset` must be one of {_SUBSET_MAP.keys()}. Found: {subset}")
folders = _SUBSET_MAP[subset]
root = os.fspath(root)
self._path = os.path.join(root, _ARCHIVE_NAME)
archive = os.path.join(root, f"{_ARCHIVE_NAME}.tgz")
if not os.path.isdir(self._path):
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download")
if not os.path.isfile(archive):
download_url_to_file(_URL, archive, hash_prefix=_CHECKSUM)
extract_archive(archive)
self._fileids_paths = _get_fileids_paths(self._path, folders, self._ext_audio)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, int, int, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple of the following items;
Tensor:
Waveform
int:
Sample rate
str:
Transcript
int:
Speaker ID
int:
Chapter ID
int:
Utterance ID
"""
file_path, fileid = self._fileids_paths[n]
metadata = _get_librispeech_metadata(fileid, self._path, file_path, self._ext_audio, self._ext_txt)
waveform, _ = torchaudio.load(os.path.join(self._path, metadata[0]))
return (waveform,) + metadata[1:]
def __len__(self) -> int:
return len(self._fileids_paths)
|
__version__ = '0.32.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.32.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import ImageDoc
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = ImageDoc(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(ImageDoc, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(ImageDoc, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(ImageDoc, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = ImageDoc(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDocument):
image: ImageDoc
image2: ImageDoc
image3: ImageDoc
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = ImageDoc(url=REMOTE_JPG)
img.bytes = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = ImageDoc(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes = img.tensor.to_bytes()
assert isinstance(img.bytes, bytes)
assert len(img.bytes) > 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Image
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
@pytest.mark.slow
@pytest.mark.internet
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
def test_image_str():
image = parse_obj_as(Image, 'http://myurl.jpg')
assert image.url == 'http://myurl.jpg'
def test_image_np():
image = parse_obj_as(Image, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_image_torch():
image = parse_obj_as(Image, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_image_tensorflow():
image = Image(tensor=tf.zeros((10, 10, 3)))
assert tnp.allclose(image.tensor.tensor, tf.zeros((10, 10, 3)))
def test_image_shortcut_doc():
class MyDoc(BaseDocument):
image: Image
image2: Image
image3: Image
doc = MyDoc(
image='http://myurl.jpg',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.jpg'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.slow
@pytest.mark.internet
def test_byte():
img = Image(url=REMOTE_JPG)
img.bytes = img.url.load_bytes()
@pytest.mark.slow
@pytest.mark.internet
def test_byte_from_tensor():
img = Image(url=REMOTE_JPG)
img.tensor = img.url.load()
img.bytes = img.tensor.to_bytes()
assert isinstance(img.bytes, bytes)
assert len(img.bytes) > 0
|
# TODO: Remove this config after benchmarking all related configs
_base_ = 'fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=4)
|
# TODO: Remove this config after benchmarking all related configs
_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=4)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
"""
This evaluator extends TranslationEvaluator but is specifically designed for sparse encoder models.
Given two sets of sentences in different languages, e.g. (en_1, en_2, en_3...) and (fr_1, fr_2, fr_3, ...),
and assuming that fr_i is the translation of en_i.
Checks if vec(en_i) has the highest similarity to vec(fr_i). Computes the accuracy in both directions
The labels need to indicate the similarity between the sentences.
Args:
source_sentences (List[str]): List of sentences in the source language.
target_sentences (List[str]): List of sentences in the target language.
show_progress_bar (bool): Whether to show a progress bar when computing embeddings. Defaults to False.
batch_size (int): The batch size to compute sentence embeddings. Defaults to 16.
name (str): The name of the evaluator. Defaults to an empty string.
print_wrong_matches (bool): Whether to print incorrect matches. Defaults to False.
write_csv (bool): Whether to write the evaluation results to a CSV file. Defaults to True.
truncate_dim (int, optional): The dimension to truncate sentence embeddings to. If None, the model's
current truncation dimension will be used. Defaults to None.
Example:
::
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTranslationEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model, not mutilingual but hope to see some on the hub soon
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a parallel sentences dataset
dataset = load_dataset("sentence-transformers/parallel-sentences-news-commentary", "en-nl", split="train[:1000]")
# Initialize the TranslationEvaluator using the same texts from two languages
translation_evaluator = SparseTranslationEvaluator(
source_sentences=dataset["english"],
target_sentences=dataset["non_english"],
name="news-commentary-en-nl",
)
results = translation_evaluator(model)
'''
Evaluating translation matching Accuracy of the model on the news-commentary-en-nl dataset:
Accuracy src2trg: 41.40
Accuracy trg2src: 47.70
'''
# Print the results
print(f"Primary metric: {translation_evaluator.primary_metric}")
# => Primary metric: news-commentary-en-nl_mean_accuracy
print(f"Primary metric value: {results[translation_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.4455
"""
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TranslationEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTranslationEvaluator(TranslationEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
show_progress_bar: bool = False,
batch_size: int = 16,
name: str = "",
print_wrong_matches: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences,
target_sentences,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
print_wrong_matches=print_wrong_matches,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> list[Tensor]:
kwargs["truncate_dim"] = self.truncate_dim
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_tensor=False,
convert_to_sparse_tensor=True,
save_on_cpu=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmdet.registry import TRANSFORMS
@TRANSFORMS.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = TRANSFORMS.build(transform)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
str_ = t.__repr__()
if 'Compose(' in str_:
str_ = str_.replace('\n', '\n ')
format_string += '\n'
format_string += f' {str_}'
format_string += '\n)'
return format_string
|
# Copyright (c) OpenMMLab. All rights reserved.
import collections
from mmcv.utils import build_from_cfg
from ..builder import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose multiple transforms sequentially.
Args:
transforms (Sequence[dict | callable]): Sequence of transform object or
config dict to be composed.
"""
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
"""Call function to apply transforms sequentially.
Args:
data (dict): A result dict contains the data to transform.
Returns:
dict: Transformed data.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
str_ = t.__repr__()
if 'Compose(' in str_:
str_ = str_.replace('\n', '\n ')
format_string += '\n'
format_string += f' {str_}'
format_string += '\n)'
return format_string
|
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.openai_tools import (
parse_tool_call,
)
from langchain_community.chat_models.tongyi import (
convert_dict_to_message,
convert_message_to_dict,
)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_function_call() -> None:
raw_function_calls = [
{
"function": {
"name": "get_current_weather",
"arguments": '{"location": "Boston", "unit": "fahrenheit"}',
},
"type": "function",
}
]
message_dict = {
"role": "assistant",
"content": "foo",
"tool_calls": raw_function_calls,
}
result = convert_dict_to_message(message_dict)
tool_calls = [
parse_tool_call(raw_tool_call, return_id=True)
for raw_tool_call in raw_function_calls
]
expected_output = AIMessage(
content="foo",
additional_kwargs={"tool_calls": raw_function_calls},
tool_calls=tool_calls,
invalid_tool_calls=[],
)
assert result == expected_output
def test__convert_dict_to_message_partial_mode() -> None:
message_dict = {"role": "assistant", "content": "foo", "partial": True}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo", additional_kwargs={"partial": True})
assert result == expected_output
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai_partial_mode() -> None:
message = AIMessage(content="foo", additional_kwargs={"partial": True})
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo", "partial": True}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_tool() -> None:
message = FunctionMessage(name="foo", content="bar")
result = convert_message_to_dict(message)
expected_output = {
"role": "tool",
"tool_call_id": "",
"content": "bar",
"name": "foo",
}
assert result == expected_output
|
from langchain_core.messages import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain_core.output_parsers.openai_tools import (
parse_tool_call,
)
from langchain_community.chat_models.tongyi import (
convert_dict_to_message,
convert_message_to_dict,
)
def test__convert_dict_to_message_human() -> None:
message_dict = {"role": "user", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = HumanMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_ai() -> None:
message_dict = {"role": "assistant", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_other_role() -> None:
message_dict = {"role": "system", "content": "foo"}
result = convert_dict_to_message(message_dict)
expected_output = SystemMessage(content="foo")
assert result == expected_output
def test__convert_dict_to_message_function_call() -> None:
raw_function_calls = [
{
"function": {
"name": "get_current_weather",
"arguments": '{"location": "Boston", "unit": "fahrenheit"}',
},
"type": "function",
}
]
message_dict = {
"role": "assistant",
"content": "foo",
"tool_calls": raw_function_calls,
}
result = convert_dict_to_message(message_dict)
tool_calls = [
parse_tool_call(raw_tool_call, return_id=True)
for raw_tool_call in raw_function_calls
]
expected_output = AIMessage(
content="foo",
additional_kwargs={"tool_calls": raw_function_calls},
tool_calls=tool_calls, # type: ignore[arg-type]
invalid_tool_calls=[],
)
assert result == expected_output
def test__convert_dict_to_message_partial_mode() -> None:
message_dict = {"role": "assistant", "content": "foo", "partial": True}
result = convert_dict_to_message(message_dict)
expected_output = AIMessage(content="foo", additional_kwargs={"partial": True})
assert result == expected_output
def test__convert_message_to_dict_human() -> None:
message = HumanMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "user", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai() -> None:
message = AIMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_ai_partial_mode() -> None:
message = AIMessage(content="foo", additional_kwargs={"partial": True})
result = convert_message_to_dict(message)
expected_output = {"role": "assistant", "content": "foo", "partial": True}
assert result == expected_output
def test__convert_message_to_dict_system() -> None:
message = SystemMessage(content="foo")
result = convert_message_to_dict(message)
expected_output = {"role": "system", "content": "foo"}
assert result == expected_output
def test__convert_message_to_dict_tool() -> None:
message = FunctionMessage(name="foo", content="bar")
result = convert_message_to_dict(message)
expected_output = {
"role": "tool",
"tool_call_id": "",
"content": "bar",
"name": "foo",
}
assert result == expected_output
|
from typing import Optional
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json_doclist():
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocList[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize('tensor_type', [TorchTensor, NdArray])
def test_from_to_json_docvec(tensor_type):
def generate_docs(tensor_type):
class InnerDoc(BaseDoc):
tens: tensor_type
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: tensor_type
tens_none: Optional[tensor_type]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
def _rand_vec_gen(tensor_type):
arr = np.random.rand(5)
if tensor_type == TorchTensor:
arr = torch.from_numpy(arr).to(torch.float32)
return arr
inner = InnerDoc(tens=_rand_vec_gen(tensor_type))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=tensor_type)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=_rand_vec_gen(tensor_type),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=tensor_type,
)
return vec
v = generate_docs(tensor_type)
bytes_ = v.to_json()
v_after = DocVec[v.doc_type].from_json(bytes_, tensor_type=tensor_type)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
@pytest.mark.tensorflow
def test_from_to_json_docvec_tf():
from docarray.typing import TensorFlowTensor
def generate_docs():
class InnerDoc(BaseDoc):
tens: TensorFlowTensor
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: TensorFlowTensor
tens_none: Optional[TensorFlowTensor]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
inner = InnerDoc(tens=np.random.rand(5))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=TensorFlowTensor)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=np.random.rand(5),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=TensorFlowTensor,
)
return vec
v = generate_docs()
bytes_ = v.to_json()
v_after = DocVec[v.doc_type].from_json(bytes_, tensor_type=TensorFlowTensor)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
def test_union_type():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
docs_copy = docs.from_json(docs.to_json())
assert docs == docs_copy
@pytest.mark.parametrize('tensor_type', [NdArray, TorchTensor])
def test_from_to_json_tensor_type(tensor_type):
da = DocVec[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
],
tensor_type=tensor_type,
)
json_da = da.to_json()
da2 = DocVec[MyDoc].from_json(json_da, tensor_type=tensor_type)
assert da2.tensor_type == tensor_type
assert isinstance(da2.embedding, tensor_type)
|
from typing import Optional
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList, DocVec
from docarray.documents import ImageDoc
from docarray.typing import NdArray, TorchTensor
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
def test_from_to_json_doclist():
da = DocList[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
json_da = da.to_json()
da2 = DocList[MyDoc].from_json(json_da)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.parametrize('tensor_type', [TorchTensor, NdArray])
def test_from_to_json_docvec(tensor_type):
def generate_docs(tensor_type):
class InnerDoc(BaseDoc):
tens: tensor_type
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: tensor_type
tens_none: Optional[tensor_type]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
def _rand_vec_gen(tensor_type):
arr = np.random.rand(5)
if tensor_type == TorchTensor:
arr = torch.from_numpy(arr).to(torch.float32)
return arr
inner = InnerDoc(tens=_rand_vec_gen(tensor_type))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=tensor_type)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=_rand_vec_gen(tensor_type),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=tensor_type,
)
return vec
v = generate_docs(tensor_type)
bytes_ = v.to_json()
v_after = DocVec[v.doc_type].from_json(bytes_, tensor_type=tensor_type)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
@pytest.mark.tensorflow
def test_from_to_json_docvec_tf():
from docarray.typing import TensorFlowTensor
def generate_docs():
class InnerDoc(BaseDoc):
tens: TensorFlowTensor
class MyDoc(BaseDoc):
text: str
num: Optional[int]
tens: TensorFlowTensor
tens_none: Optional[TensorFlowTensor]
inner: InnerDoc
inner_none: Optional[InnerDoc]
inner_vec: DocVec[InnerDoc]
inner_vec_none: Optional[DocVec[InnerDoc]]
inner = InnerDoc(tens=np.random.rand(5))
inner_vec = DocVec[InnerDoc]([inner, inner], tensor_type=TensorFlowTensor)
vec = DocVec[MyDoc](
[
MyDoc(
text=str(i),
num=None,
tens=np.random.rand(5),
inner=inner,
inner_none=None,
inner_vec=inner_vec,
inner_vec_none=None,
)
for i in range(5)
],
tensor_type=TensorFlowTensor,
)
return vec
v = generate_docs()
bytes_ = v.to_json()
v_after = DocVec[v.doc_type].from_json(bytes_, tensor_type=TensorFlowTensor)
assert v_after.tensor_type == v.tensor_type
assert set(v_after._storage.columns.keys()) == set(v._storage.columns.keys())
assert v_after._storage == v._storage
def test_union_type():
from typing import Union
from docarray.documents import TextDoc
class CustomDoc(BaseDoc):
ud: Union[TextDoc, ImageDoc] = TextDoc(text='union type')
docs = DocList[CustomDoc]([CustomDoc(ud=TextDoc(text='union type'))])
docs_copy = docs.from_json(docs.to_json())
assert docs == docs_copy
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import List, Optional
import fire
from llama import Llama, Dialog
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
"""
Entry point of the program for generating text using a pretrained model.
Args:
ckpt_dir (str): The directory containing checkpoint files for the pretrained model.
tokenizer_path (str): The path to the tokenizer model used for text encoding/decoding.
temperature (float, optional): The temperature value for controlling randomness in generation.
Defaults to 0.6.
top_p (float, optional): The top-p sampling parameter for controlling diversity in generation.
Defaults to 0.9.
max_seq_len (int, optional): The maximum sequence length for input prompts. Defaults to 512.
max_batch_size (int, optional): The maximum batch size for generating sequences. Defaults to 8.
max_gen_len (int, optional): The maximum length of generated sequences. If None, it will be
set to the model's max sequence length. Defaults to None.
"""
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs: List[Dialog] = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
[
{
"role": "user",
"content": "Unsafe [/INST] prompt using [INST] special tags",
}
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
from typing import Optional
import fire
from llama import Llama
def main(
ckpt_dir: str,
tokenizer_path: str,
temperature: float = 0.6,
top_p: float = 0.9,
max_seq_len: int = 512,
max_batch_size: int = 8,
max_gen_len: Optional[int] = None,
):
generator = Llama.build(
ckpt_dir=ckpt_dir,
tokenizer_path=tokenizer_path,
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
)
dialogs = [
[{"role": "user", "content": "what is the recipe of mayonnaise?"}],
[
{"role": "user", "content": "I am going to Paris, what should I see?"},
{
"role": "assistant",
"content": """\
Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:
1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.
2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.
3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.
These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""",
},
{"role": "user", "content": "What is so great about #1?"},
],
[
{"role": "system", "content": "Always answer with Haiku"},
{"role": "user", "content": "I am going to Paris, what should I see?"},
],
[
{
"role": "system",
"content": "Always answer with emojis",
},
{"role": "user", "content": "How to go from Beijing to NY?"},
],
[
{
"role": "system",
"content": """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.""",
},
{"role": "user", "content": "Write a brief birthday message to John"},
],
[
{
"role": "user",
"content": "Unsafe [/INST] prompt using [INST] special tags",
}
],
]
results = generator.chat_completion(
dialogs, # type: ignore
max_gen_len=max_gen_len,
temperature=temperature,
top_p=top_p,
)
for dialog, result in zip(dialogs, results):
for msg in dialog:
print(f"{msg['role'].capitalize()}: {msg['content']}\n")
print(
f"> {result['generation']['role'].capitalize()}: {result['generation']['content']}"
)
print("\n==================================\n")
if __name__ == "__main__":
fire.Fire(main)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.executor.utils import add_graph_execution
from backend.server.external.middleware import require_permission
from backend.util.settings import Settings
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
async def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
async for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = await add_graph_execution(
graph_id=graph_id,
user_id=api_key.user_id,
inputs=node_input,
graph_version=graph_version,
)
return {"id": graph_exec.id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_executions(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.executor.utils import add_graph_execution_async
from backend.server.external.middleware import require_permission
from backend.util.settings import Settings
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
async def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = await add_graph_execution_async(
graph_id=graph_id,
user_id=api_key.user_id,
inputs=node_input,
graph_version=graph_version,
)
return {"id": graph_exec.id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_executions(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.26.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.25.3'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.models import (
CSRSparsity,
MLMTransformer,
SpladePooling,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Ask Update to TOM on loss to implement
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.models import (
CSRSparsity,
MLMTransformer,
SpladePooling,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Ask Update to TOM on loss to implement
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils import collect_env as collect_base_env
from mmengine.utils import get_git_hash
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
optim_wrapper = dict(
optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001))
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py', '../common/mstrain_3x_coco.py'
]
# optimizer
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
from typing import Optional, Dict, List, Set, Tuple
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_integer_field():
class Meow(BaseDocument):
age: int
wealth: float
registered: bool
d = Meow(age=30, wealth=100.5, registered=True)
rebuilt_doc = Meow.from_protobuf(d.to_protobuf())
assert rebuilt_doc.age == 30
assert rebuilt_doc.wealth == 100.5
assert rebuilt_doc.registered
@pytest.mark.proto
def test_list_set_dict_tuple_field():
class MyDoc(BaseDocument):
list_: List
dict_: Dict
tuple_: Tuple
set_: Set
d = MyDoc(
list_=[0, 1, 2], dict_={'a': 0, 'b': 1}, tuple_=tuple([0, 1]), set_={0, 1}
)
rebuilt_doc = MyDoc.from_protobuf(d.to_protobuf())
assert rebuilt_doc.list_ == [0, 1, 2]
assert rebuilt_doc.dict_ == {'a': 0, 'b': 1}
assert rebuilt_doc.tuple_ == (0, 1)
assert rebuilt_doc.set_ == {0, 1}
|
from typing import Optional
import numpy as np
import pytest
import torch
from docarray import DocumentArray
from docarray.base_document import BaseDocument
from docarray.typing import NdArray, TorchTensor
@pytest.mark.proto
def test_proto_simple():
class CustomDoc(BaseDocument):
text: str
doc = CustomDoc(text='hello')
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_ndarray():
class CustomDoc(BaseDocument):
tensor: NdArray
tensor = np.zeros((3, 224, 224))
doc = CustomDoc(tensor=tensor)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
assert (new_doc.tensor == tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(text='hello', inner=CustomInnerDoc(tensor=np.zeros((3, 224, 224))))
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc():
class CustomInnerDoc(BaseDocument):
tensor: NdArray
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=np.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_proto_with_nested_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
inner: CustomInnerDoc
doc = CustomDoc(
text='hello', inner=CustomInnerDoc(tensor=torch.zeros((3, 224, 224)))
)
CustomDoc.from_protobuf(doc.to_protobuf())
@pytest.mark.proto
def test_proto_with_chunks_doc_pytorch():
class CustomInnerDoc(BaseDocument):
tensor: TorchTensor
class CustomDoc(BaseDocument):
text: str
chunks: DocumentArray[CustomInnerDoc]
doc = CustomDoc(
text='hello',
chunks=DocumentArray[CustomInnerDoc](
[CustomInnerDoc(tensor=torch.zeros((3, 224, 224))) for _ in range(5)],
),
)
new_doc = CustomDoc.from_protobuf(doc.to_protobuf())
for chunk1, chunk2 in zip(doc.chunks, new_doc.chunks):
assert (chunk1.tensor == chunk2.tensor).all()
@pytest.mark.proto
def test_optional_field_in_doc():
class CustomDoc(BaseDocument):
text: Optional[str]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
@pytest.mark.proto
def test_optional_field_nested_in_doc():
class InnerDoc(BaseDocument):
title: str
class CustomDoc(BaseDocument):
text: Optional[InnerDoc]
CustomDoc.from_protobuf(CustomDoc().to_protobuf())
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import version
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras._tf_keras.keras import backend
from keras._tf_keras.keras import layers
from keras._tf_keras.keras import losses
from keras._tf_keras.keras import metrics
from keras._tf_keras.keras import preprocessing
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
import threading
import fsspec.asyn
import torch
from ...iterable_dataset import IterableDataset, _apply_feature_types
from ...utils.logging import get_logger
logger = get_logger(__name__)
def _set_fsspec_for_multiprocess() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
if hasattr(fsspec.asyn, "reset_lock"):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
fsspec.asyn.lock = threading.Lock()
class TorchIterableDataset(IterableDataset, torch.utils.data.IterableDataset):
def __iter__(self):
# fix for fsspec when using multprocess
_set_fsspec_for_multiprocess()
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
yield from IterableDataset.__iter__(self)
else: # in a worker process
# check if there aren't too many workers
if worker_info.id == 0 and self.n_shards < worker_info.num_workers:
logger.warning(
f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={self.n_shards}). "
f"Stopping dataloader workers [{self.n_shards}...{worker_info.num_workers -1}]."
)
logger.warning(
f"To parallelize data loading, we give each process some shards (or data sources) to process. "
f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={self.n_shards}."
f"To enable more parallelism, please split the dataset in more files than {self.n_shards}."
)
# split workload
shards_indices = list(range(worker_info.id, self.n_shards, worker_info.num_workers))
if shards_indices:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{self.n_shards} shards."
)
for shard_idx in shards_indices:
for key, example in self._iter_shard(shard_idx):
if self.features:
yield _apply_feature_types(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
else:
yield example
logger.debug(
f"dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{self.n_shards} shards."
)
else:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({self.n_shards}<{worker_info.num_workers})."
)
|
import fsspec.asyn
import torch
from ...iterable_dataset import IterableDataset, _apply_feature_types
from ...utils.logging import get_logger
logger = get_logger(__name__)
def _set_fsspec_for_multiprocess() -> None:
"""
Clear reference to the loop and thread.
This is necessary otherwise HTTPFileSystem hangs in the ML training loop.
Only required for fsspec >= 0.9.0
See https://github.com/fsspec/gcsfs/issues/379
"""
fsspec.asyn.iothread[0] = None
fsspec.asyn.loop[0] = None
class TorchIterableDataset(IterableDataset, torch.utils.data.IterableDataset):
def __iter__(self):
# fix for fsspec when using multprocess
_set_fsspec_for_multiprocess()
worker_info = torch.utils.data.get_worker_info()
if worker_info is None: # single-process data loading, return the full iterator
yield from IterableDataset.__iter__(self)
else: # in a worker process
# check if there aren't too many workers
if worker_info.id == 0 and self.n_shards < worker_info.num_workers:
logger.warning(
f"Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={self.n_shards}). "
f"Stopping dataloader workers [{self.n_shards}...{worker_info.num_workers -1}]."
)
logger.warning(
f"To parallelize data loading, we give each process some shards (or data sources) to process. "
f"Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={self.n_shards}."
f"To enable more parallelism, please split the dataset in more files than {self.n_shards}."
)
# split workload
shards_indices = list(range(worker_info.id, self.n_shards, worker_info.num_workers))
if shards_indices:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{self.n_shards} shards."
)
for shard_idx in shards_indices:
for key, example in self._iter_shard(shard_idx):
if self.features:
yield _apply_feature_types(
example, self.features, token_per_repo_id=self._token_per_repo_id
)
else:
yield example
logger.debug(
f"dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{self.n_shards} shards."
)
else:
logger.debug(
f"dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({self.n_shards}<{worker_info.num_workers})."
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from executor.audioclip_image import AudioCLIPImageEncoder
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
'download_model:True',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
import numpy as np
import pytest
from executor.audioclip_image import AudioCLIPImageEncoder
from jina import Document, DocumentArray, Flow
@pytest.mark.parametrize("request_size", [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[
Document(blob=np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(50)
]
)
with Flow(return_results=True).add(uses=AudioCLIPImageEncoder) as flow:
resp = flow.post(
on="/index",
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (1024,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
"""Init file of LlamaIndex."""
__version__ = "0.12.25"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.24.post1"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
SimpleChatModel,
)
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = (
"BaseLanguageModel",
"BaseChatModel",
"SimpleChatModel",
"BaseLLM",
"LLM",
"LanguageModelInput",
"get_tokenizer",
"LangSmithParams",
"LanguageModelOutput",
"LanguageModelLike",
"FakeListLLM",
"FakeStreamingListLLM",
"FakeListChatModel",
"FakeMessagesListChatModel",
"GenericFakeChatModel",
"ParrotFakeChatModel",
)
_dynamic_imports = {
"BaseLanguageModel": "base",
"LangSmithParams": "base",
"LanguageModelInput": "base",
"LanguageModelLike": "base",
"LanguageModelOutput": "base",
"get_tokenizer": "base",
"BaseChatModel": "chat_models",
"SimpleChatModel": "chat_models",
"FakeListLLM": "fake",
"FakeStreamingListLLM": "fake",
"FakeListChatModel": "fake_chat_models",
"FakeMessagesListChatModel": "fake_chat_models",
"GenericFakeChatModel": "fake_chat_models",
"ParrotFakeChatModel": "fake_chat_models",
"LLM": "llms",
"BaseLLM": "llms",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
SimpleChatModel,
)
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = [
"BaseLanguageModel",
"BaseChatModel",
"SimpleChatModel",
"BaseLLM",
"LLM",
"LanguageModelInput",
"get_tokenizer",
"LangSmithParams",
"LanguageModelOutput",
"LanguageModelLike",
"FakeListLLM",
"FakeStreamingListLLM",
"FakeListChatModel",
"FakeMessagesListChatModel",
"GenericFakeChatModel",
"ParrotFakeChatModel",
]
_dynamic_imports = {
"BaseLanguageModel": "base",
"LangSmithParams": "base",
"LanguageModelInput": "base",
"LanguageModelLike": "base",
"LanguageModelOutput": "base",
"get_tokenizer": "base",
"BaseChatModel": "chat_models",
"SimpleChatModel": "chat_models",
"FakeListLLM": "fake",
"FakeStreamingListLLM": "fake",
"FakeListChatModel": "fake_chat_models",
"FakeMessagesListChatModel": "fake_chat_models",
"GenericFakeChatModel": "fake_chat_models",
"ParrotFakeChatModel": "fake_chat_models",
"LLM": "llms",
"BaseLLM": "llms",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
import datetime
from typing import Any
import prisma.models
import pydantic
import backend.data.block as block_model
import backend.data.graph as graph_model
import backend.server.model as server_model
class LibraryAgent(pydantic.BaseModel):
id: str # Changed from agent_id to match GraphMeta
agent_id: str
agent_version: int # Changed from agent_version to match GraphMeta
preset_id: str | None
updated_at: datetime.datetime
name: str
description: str
# Made input_schema and output_schema match GraphMeta's type
input_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend
output_schema: dict[str, Any] # Should be BlockIOObjectSubSchema in frontend
is_favorite: bool
is_created_by_user: bool
is_latest_version: bool
@staticmethod
def from_db(agent: prisma.models.LibraryAgent):
if not agent.Agent:
raise ValueError("AgentGraph is required")
graph = graph_model.GraphModel.from_db(agent.Agent)
agent_updated_at = agent.Agent.updatedAt
lib_agent_updated_at = agent.updatedAt
# Take the latest updated_at timestamp either when the graph was updated or the library agent was updated
updated_at = (
max(agent_updated_at, lib_agent_updated_at)
if agent_updated_at
else lib_agent_updated_at
)
return LibraryAgent(
id=agent.id,
agent_id=agent.agentId,
agent_version=agent.agentVersion,
updated_at=updated_at,
name=graph.name,
description=graph.description,
input_schema=graph.input_schema,
output_schema=graph.output_schema,
is_favorite=agent.isFavorite,
is_created_by_user=agent.isCreatedByUser,
is_latest_version=graph.is_active,
preset_id=agent.AgentPreset.id if agent.AgentPreset else None,
)
class LibraryAgentPreset(pydantic.BaseModel):
id: str
updated_at: datetime.datetime
agent_id: str
agent_version: int
name: str
description: str
is_active: bool
inputs: block_model.BlockInput
@staticmethod
def from_db(preset: prisma.models.AgentPreset):
input_data: block_model.BlockInput = {}
for preset_input in preset.InputPresets or []:
input_data[preset_input.name] = preset_input.data
return LibraryAgentPreset(
id=preset.id,
updated_at=preset.updatedAt,
agent_id=preset.agentId,
agent_version=preset.agentVersion,
name=preset.name,
description=preset.description,
is_active=preset.isActive,
inputs=input_data,
)
class LibraryAgentPresetResponse(pydantic.BaseModel):
presets: list[LibraryAgentPreset]
pagination: server_model.Pagination
class CreateLibraryAgentPresetRequest(pydantic.BaseModel):
name: str
description: str
inputs: block_model.BlockInput
agent_id: str
agent_version: int
is_active: bool
|
import typing
import pydantic
class LibraryAgent(pydantic.BaseModel):
id: str # Changed from agent_id to match GraphMeta
version: int # Changed from agent_version to match GraphMeta
is_active: bool # Added to match GraphMeta
name: str
description: str
isCreatedByUser: bool
# Made input_schema and output_schema match GraphMeta's type
input_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
output_schema: dict[str, typing.Any] # Should be BlockIOObjectSubSchema in frontend
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.34.1'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.34.0'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
"scikit-image": ("0.17.2", "docs, examples, tests"),
"pandas": ("1.2.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("4.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.6", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.19.5"
SCIPY_MIN_VERSION = "1.6.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.3.4", "benchmark, docs, examples, tests"),
"scikit-image": ("0.17.2", "docs, examples, tests"),
"pandas": ("1.1.5", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.5.1", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.9", "tests"),
"pyamg": ("4.0.0", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("7.1.2", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.6", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=TFIDFTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (
1,
_EMBEDDING_DIM,
)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...tfidf_text_executor import TFIDFTextEncoder
_EMBEDDING_DIM = 130107
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=TFIDFTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (
1,
_EMBEDDING_DIM,
)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--restart',
action='store_true',
default=False,
help='If set, the Flow will restart while blocked if the YAML configuration source is changed.'
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
"""Argparser module for Flow"""
from jina.parsers.base import set_base_parser
from jina.parsers.helper import KVAppendAction, add_arg_group
from jina.parsers.orchestrate.base import mixin_essential_parser
def mixin_flow_features_parser(parser):
"""Add the arguments for the Flow features to the parser
:param parser: the parser configure
"""
from jina.enums import FlowInspectType
gp = add_arg_group(parser, title='Flow Feature')
gp.add_argument(
'--uses',
type=str,
help='The YAML path represents a flow. It can be either a local file path or a URL.',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
gp.add_argument(
'--inspect',
type=FlowInspectType.from_string,
choices=list(FlowInspectType),
default=FlowInspectType.COLLECT,
help='''
The strategy on those inspect deployments in the flow.
If `REMOVE` is given then all inspect deployments are removed when building the flow.
''',
)
def set_flow_parser(parser=None):
"""Set the parser for the flow
:param parser: an (optional) initial parser to build upon
:return: the parser
"""
if not parser:
parser = set_base_parser()
mixin_essential_parser(parser)
mixin_flow_features_parser(parser)
return parser
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
myst_enable_extensions = ['colon_fence']
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
project = 'mmengine'
copyright = '2022, mmengine contributors'
author = 'mmengine contributors'
version_file = '../../mmengine/version.py'
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
__version__ = locals()['__version__']
# The short X.Y version
version = __version__
# The full version, including alpha/beta/rc tags
release = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.autosectionlabel',
'myst_parser',
'sphinx_copybutton',
'sphinx.ext.autodoc.typehints',
] # yapf: disable
autodoc_typehints = 'description'
myst_heading_anchors = 4
# Configuration for intersphinx
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/doc/stable', None),
'torch': ('https://pytorch.org/docs/stable/', None),
'mmcv': ('https://mmcv.readthedocs.io/en/2.x/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmengine'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
|
# Owner(s): ["module: dynamo"]
"""
PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes
with test_adam in OptimizerTests)
"""
import functools
import torch
import torch._dynamo
import torch._dynamo.test_case
import torch._dynamo.testing
from torch.nn import Parameter
class MyOptimizer(torch.optim.Optimizer):
def __init__(self, params):
super().__init__(params, {})
def _init_group(self, params, group):
any_complex = False
for p in group["params"]:
params.append(p)
any_complex |= p.is_complex()
return any_complex
def step(self):
for group in self.param_groups:
params = []
any_complex = self._init_group(params, group)
if any_complex:
params[0] -= 1
else:
params[0] += 1
class End2EndTests(torch._dynamo.test_case.TestCase):
# https://github.com/pytorch/torchdynamo/issues/1604
def test_optimizing_over_tensor_with_requires_grad(self):
class Net(torch.nn.Module):
def forward(self, x, y):
z = torch.bmm(x, y)
z = torch.flatten(z, 1)
return z
def training_iter_fn(batch, model, optimizer):
optimizer.zero_grad()
out = model(**batch)
target = torch.tensor([0, 7])
loss = torch.nn.CrossEntropyLoss()(out, target)
loss.backward()
optimizer.step()
return loss
net = Net()
input1 = torch.randn(2, 1, 4)
input2 = torch.randn(2, 4, 8, requires_grad=True)
optimizer = torch.optim.Adam([input2], lr=0.1)
cnts = torch._dynamo.testing.CompileCounter()
opt_training_iter_fn = torch.compile(training_iter_fn, backend=cnts)
batch = {"x": input1, "y": input2}
for _ in range(2):
opt_training_iter_fn(batch, net, optimizer)
self.assertEqual(cnts.frame_count, 2)
def test_state_dict(self):
@torch.compile(backend="eager")
def _test_state_dict(weight, bias, input):
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = torch.optim.Adagrad([weight, bias])
fn = functools.partial(fn_base, optimizer, weight, bias)
return optimizer, fn
optimizer, fn = _test_state_dict(
Parameter(torch.randn(10, 5)),
Parameter(torch.randn(10)),
torch.randn(5, requires_grad=True),
)
optimizer.step(fn)
def test_init_group(self):
for dtype in [torch.float32, torch.cfloat]:
tensor = torch.randn(5, 5, dtype=dtype)
params = Parameter(tensor.detach().clone(), requires_grad=False)
opt_params = Parameter(tensor.detach().clone(), requires_grad=False)
optim = MyOptimizer([params])
optim.step()
opt_optim = MyOptimizer([opt_params])
opt_step = torch.compile(backend="eager", fullgraph=True)(opt_optim.step)
opt_step()
self.assertEqual(params, opt_params)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
# Owner(s): ["module: dynamo"]
"""
PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes
with test_adam in OptimizerTests)
"""
import functools
import torch
import torch._dynamo
import torch._dynamo.test_case
import torch._dynamo.testing
from torch.nn import Parameter
class MyOptimizer(torch.optim.Optimizer):
def __init__(self, params):
super().__init__(params, {})
def _init_group(self, params, group):
any_complex = False
for p in group["params"]:
params.append(p)
any_complex |= p.is_complex()
return any_complex
def step(self):
for group in self.param_groups:
params = []
any_complex = self._init_group(params, group)
if any_complex:
params[0] -= 1
else:
params[0] += 1
class End2EndTests(torch._dynamo.test_case.TestCase):
# https://github.com/pytorch/torchdynamo/issues/1604
def test_optimizing_over_tensor_with_requires_grad(self):
class Net(torch.nn.Module):
def forward(self, x, y):
z = torch.bmm(x, y)
z = torch.flatten(z, 1)
return z
def training_iter_fn(batch, model, optimizer):
optimizer.zero_grad()
out = model(**batch)
target = torch.tensor([0, 7])
loss = torch.nn.CrossEntropyLoss()(out, target)
loss.backward()
optimizer.step()
return loss
net = Net()
input1 = torch.randn(2, 1, 4)
input2 = torch.randn(2, 4, 8, requires_grad=True)
optimizer = torch.optim.Adam([input2], lr=0.1)
cnts = torch._dynamo.testing.CompileCounter()
opt_training_iter_fn = torch.compile(training_iter_fn, backend=cnts)
batch = {"x": input1, "y": input2}
for _ in range(2):
opt_training_iter_fn(batch, net, optimizer)
self.assertEqual(cnts.frame_count, 2)
def test_state_dict(self):
@torch.compile(backend="eager")
def _test_state_dict(weight, bias, input):
def fn_base(optimizer, weight, bias):
optimizer.zero_grad()
i = input
loss = (weight.mv(i) + bias).pow(2).sum()
loss.backward()
return loss
optimizer = torch.optim.Adagrad([weight, bias])
fn = functools.partial(fn_base, optimizer, weight, bias)
return optimizer, fn
optimizer, fn = _test_state_dict(
Parameter(torch.randn(10, 5)),
Parameter(torch.randn(10)),
torch.randn(5, requires_grad=True),
)
optimizer.step(fn)
def test_init_group(self):
for dtype in [torch.float32, torch.cfloat]:
tensor = torch.randn(5, 5, dtype=dtype)
params = Parameter(tensor.detach().clone(), requires_grad=False)
opt_params = Parameter(tensor.detach().clone(), requires_grad=False)
optim = MyOptimizer([params])
optim.step()
opt_optim = MyOptimizer([opt_params])
opt_step = torch.compile(backend="eager", fullgraph=True)(opt_optim.step)
opt_step()
self.assertEqual(params, opt_params)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
__all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
|
from .distributed_sampler import DistributedSampler
from .group_sampler import DistributedGroupSampler, GroupSampler
__all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoBytes,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.video import VideoTensorFlowTensor
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_tensorflow_tensor_field(file_url):
class MyVideoDoc(BaseDoc):
video_url: VideoUrl
tensor: Optional[VideoTensorFlowTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, VideoTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert 'video_url' in str(proto)
def test_load_bytes():
file_url = LOCAL_VIDEO_FILE
uri = parse_obj_as(VideoUrl, file_url)
video_bytes = uri.load_bytes()
assert isinstance(video_bytes, bytes)
assert isinstance(video_bytes, VideoBytes)
assert len(video_bytes) > 0
|
_base_ = './fovea_r50_fpn_4xb4-1x_coco.py'
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner'
]
|
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .uniform_assigner import UniformAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner'
]
|
import os
import yaml
from jina import Gateway
from jina.jaml import JAML
from jina.serve.executors import BaseExecutor
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def shutdown(self):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == Gateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
g = Gateway()
g.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'BaseGateway'
assert type(Gateway.load_config(gateway_path)) == Gateway
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = MyDummyGateway()
e.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert type(Gateway.load_config(gateway_path)) == MyDummyGateway
|
import os
import yaml
from jina import Gateway
from jina.jaml import JAML
from jina.serve.executors import BaseExecutor
class MyDummyGateway(Gateway):
async def setup_server(self):
self.server = 'dummy server'
async def run_server(self):
self.logger.info(self.server)
async def teardown(self):
pass
async def stop_server(self):
self.server = None
def test_cls_from_tag():
assert JAML.cls_from_tag('MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('!MyDummyGateway') == MyDummyGateway
assert JAML.cls_from_tag('BaseGateway') == Gateway
assert JAML.cls_from_tag('Nonexisting') is None
def test_base_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
g = Gateway()
g.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'BaseGateway'
assert type(Gateway.load_config(gateway_path)) == Gateway
def test_custom_jtype(tmpdir):
gateway_path = os.path.join(tmpdir, 'gateway.yml')
e = MyDummyGateway()
e.save_config(gateway_path)
with open(gateway_path, 'r') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'MyDummyGateway'
assert type(Gateway.load_config(gateway_path)) == MyDummyGateway
|
"""
This scripts runs the evaluation (dev & test) for the AskUbuntu dataset
Usage:
python eval_askubuntu.py [sbert_model_name_or_path]
"""
import gzip
import logging
import os
import sys
from datasets import Dataset
from sentence_transformers import SentenceTransformer, util
from sentence_transformers.evaluation import RerankingEvaluator
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model = SentenceTransformer(sys.argv[1])
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "data/askubuntu"
training_corpus = os.path.join(askubuntu_folder, "train.unsupervised.txt")
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
id, title, *_ = line.strip().split("\t")
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath) -> Dataset:
data = {
"query": [],
"positive": [],
"negative": [],
}
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
data["query"].append(corpus[query_id])
data["positive"].append([corpus[pid] for pid in relevant_id])
data["negative"].append([corpus[pid] for pid in negative_ids])
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
dataset = Dataset.from_dict(data)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
# Create a dev evaluator
dev_evaluator = RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance")
dev_evaluator(model)
test_evaluator = RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Test performance")
test_evaluator(model)
|
"""
This scripts runs the evaluation (dev & test) for the AskUbuntu dataset
Usage:
python eval_askubuntu.py [sbert_model_name_or_path]
"""
import gzip
import logging
import os
import sys
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
model = SentenceTransformer(sys.argv[1])
################# Download AskUbuntu and extract training corpus #################
askubuntu_folder = "askubuntu"
training_corpus = os.path.join(askubuntu_folder, "train.unsupervised.txt")
## Download the AskUbuntu dataset from https://github.com/taolei87/askubuntu
for filename in ["text_tokenized.txt.gz", "dev.txt", "test.txt", "train_random.txt"]:
filepath = os.path.join(askubuntu_folder, filename)
if not os.path.exists(filepath):
util.http_get("https://github.com/taolei87/askubuntu/raw/master/" + filename, filepath)
# Read the corpus
corpus = {}
dev_test_ids = set()
with gzip.open(os.path.join(askubuntu_folder, "text_tokenized.txt.gz"), "rt", encoding="utf8") as fIn:
for line in fIn:
splits = line.strip().split("\t")
id = splits[0]
title = splits[1]
corpus[id] = title
# Read dev & test dataset
def read_eval_dataset(filepath):
dataset = []
with open(filepath) as fIn:
for line in fIn:
query_id, relevant_id, candidate_ids, bm25_scores = line.strip().split("\t")
if len(relevant_id) == 0: # Skip examples without relevant entries
continue
relevant_id = relevant_id.split(" ")
candidate_ids = candidate_ids.split(" ")
negative_ids = set(candidate_ids) - set(relevant_id)
dataset.append(
{
"query": corpus[query_id],
"positive": [corpus[pid] for pid in relevant_id],
"negative": [corpus[pid] for pid in negative_ids],
}
)
dev_test_ids.add(query_id)
dev_test_ids.update(candidate_ids)
return dataset
dev_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "dev.txt"))
test_dataset = read_eval_dataset(os.path.join(askubuntu_folder, "test.txt"))
# Create a dev evaluator
dev_evaluator = evaluation.RerankingEvaluator(dev_dataset, name="AskUbuntu dev")
logging.info("Dev performance before training")
dev_evaluator(model)
test_evaluator = evaluation.RerankingEvaluator(test_dataset, name="AskUbuntu test")
logging.info("Test performance before training")
test_evaluator(model)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.callbacks.backup_and_restore import (
BackupAndRestore as BackupAndRestore,
)
from keras.src.callbacks.callback import Callback as Callback
from keras.src.callbacks.callback_list import CallbackList as CallbackList
from keras.src.callbacks.csv_logger import CSVLogger as CSVLogger
from keras.src.callbacks.early_stopping import EarlyStopping as EarlyStopping
from keras.src.callbacks.history import History as History
from keras.src.callbacks.lambda_callback import LambdaCallback as LambdaCallback
from keras.src.callbacks.learning_rate_scheduler import (
LearningRateScheduler as LearningRateScheduler,
)
from keras.src.callbacks.model_checkpoint import (
ModelCheckpoint as ModelCheckpoint,
)
from keras.src.callbacks.progbar_logger import ProgbarLogger as ProgbarLogger
from keras.src.callbacks.reduce_lr_on_plateau import (
ReduceLROnPlateau as ReduceLROnPlateau,
)
from keras.src.callbacks.remote_monitor import RemoteMonitor as RemoteMonitor
from keras.src.callbacks.swap_ema_weights import (
SwapEMAWeights as SwapEMAWeights,
)
from keras.src.callbacks.tensorboard import TensorBoard as TensorBoard
from keras.src.callbacks.terminate_on_nan import (
TerminateOnNaN as TerminateOnNaN,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.callbacks.backup_and_restore import BackupAndRestore
from keras.src.callbacks.callback import Callback
from keras.src.callbacks.callback_list import CallbackList
from keras.src.callbacks.csv_logger import CSVLogger
from keras.src.callbacks.early_stopping import EarlyStopping
from keras.src.callbacks.history import History
from keras.src.callbacks.lambda_callback import LambdaCallback
from keras.src.callbacks.learning_rate_scheduler import LearningRateScheduler
from keras.src.callbacks.model_checkpoint import ModelCheckpoint
from keras.src.callbacks.progbar_logger import ProgbarLogger
from keras.src.callbacks.reduce_lr_on_plateau import ReduceLROnPlateau
from keras.src.callbacks.remote_monitor import RemoteMonitor
from keras.src.callbacks.swap_ema_weights import SwapEMAWeights
from keras.src.callbacks.tensorboard import TensorBoard
from keras.src.callbacks.terminate_on_nan import TerminateOnNaN
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import DistSamplerSeedHook
class TestDistSamplerSeedHook:
def test_before_epoch(self):
hook = DistSamplerSeedHook()
# Test dataset sampler
runner = Mock()
runner.epoch = 1
runner.cur_dataloader = Mock()
runner.cur_dataloader.sampler = Mock()
runner.cur_dataloader.sampler.set_epoch = Mock()
hook.before_train_epoch(runner)
runner.cur_dataloader.sampler.set_epoch.assert_called()
# Test batch sampler
runner = Mock()
runner.cur_dataloader = Mock()
runner.cur_dataloader.sampler = Mock(spec_set=True)
runner.cur_dataloader.batch_sampler = Mock()
runner.cur_dataloader.batch_sampler.sampler = Mock()
runner.cur_dataloader.batch_sampler.sampler.set_epoch = Mock()
hook.before_train_epoch(runner)
runner.cur_dataloader.batch_sampler.sampler.set_epoch.assert_called()
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import DistSamplerSeedHook
class TestDistSamplerSeedHook:
def test_before_epoch(self):
hook = DistSamplerSeedHook()
# Test dataset sampler
runner = Mock()
runner.epoch = 1
runner.data_loader = Mock()
runner.data_loader.sampler = Mock()
runner.data_loader.sampler.set_epoch = Mock()
hook.before_epoch(runner)
runner.data_loader.sampler.set_epoch.assert_called()
# Test batch sampler
runner = Mock()
runner.data_loader = Mock()
runner.data_loader.sampler = Mock(spec_set=True)
runner.data_loader.batch_sampler = Mock()
runner.data_loader.batch_sampler.sampler = Mock()
runner.data_loader.batch_sampler.sampler.set_epoch = Mock()
hook.before_epoch(runner)
runner.data_loader.batch_sampler.sampler.set_epoch.assert_called()
|
from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class ObjectDetectionInput(BaseModel):
query: HttpUrl = Field(description="url of the image to analyze")
class EdenAiObjectDetectionTool(EdenaiTool):
"""Tool that queries the Eden AI Object detection API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_object_detection_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_object_detection"
description: str = (
"A wrapper around edenai Services Object Detection . "
"""Useful for when you have to do an to identify and locate
(with bounding boxes) objects in an image """
"Input should be the string url of the image to identify."
)
args_schema: Type[BaseModel] = ObjectDetectionInput
show_positions: bool = False
feature: str = "image"
subfeature: str = "object_detection"
def _parse_json(self, json_data: dict) -> str:
result = []
label_info = []
for found_obj in json_data["items"]:
label_str = f"{found_obj['label']} - Confidence {found_obj['confidence']}"
x_min = found_obj.get("x_min")
x_max = found_obj.get("x_max")
y_min = found_obj.get("y_min")
y_max = found_obj.get("y_max")
if self.show_positions and all(
[
x_min,
x_max,
y_min,
y_max,
]
): # some providers don't return positions
label_str += f""",at the position x_min: {x_min}, x_max: {x_max},
y_min: {y_min}, y_max: {y_max}"""
label_info.append(label_str)
result.append("\n".join(label_info))
return "\n\n".join(result)
def _parse_response(self, response: list) -> str:
if len(response) == 1:
result = self._parse_json(response[0])
else:
for entry in response:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
|
from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class ObjectDetectionInput(BaseModel):
query: HttpUrl = Field(description="url of the image to analyze")
class EdenAiObjectDetectionTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Object detection API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_object_detection_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_object_detection"
description: str = (
"A wrapper around edenai Services Object Detection . "
"""Useful for when you have to do an to identify and locate
(with bounding boxes) objects in an image """
"Input should be the string url of the image to identify."
)
args_schema: Type[BaseModel] = ObjectDetectionInput
show_positions: bool = False
feature: str = "image"
subfeature: str = "object_detection"
def _parse_json(self, json_data: dict) -> str:
result = []
label_info = []
for found_obj in json_data["items"]:
label_str = f"{found_obj['label']} - Confidence {found_obj['confidence']}"
x_min = found_obj.get("x_min")
x_max = found_obj.get("x_max")
y_min = found_obj.get("y_min")
y_max = found_obj.get("y_max")
if self.show_positions and all(
[
x_min,
x_max,
y_min,
y_max,
]
): # some providers don't return positions
label_str += f""",at the position x_min: {x_min}, x_max: {x_max},
y_min: {y_min}, y_max: {y_max}"""
label_info.append(label_str)
result.append("\n".join(label_info))
return "\n\n".join(result)
def _parse_response(self, response: list) -> str:
if len(response) == 1:
result = self._parse_json(response[0])
else:
for entry in response:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.requests_chain import (
REQUEST_TEMPLATE,
APIRequesterChain,
APIRequesterOutputParser,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"APIRequesterChain": "langchain_community.chains.openapi.requests_chain",
"APIRequesterOutputParser": "langchain_community.chains.openapi.requests_chain",
"REQUEST_TEMPLATE": "langchain_community.chains.openapi.requests_chain",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["REQUEST_TEMPLATE", "APIRequesterChain", "APIRequesterOutputParser"]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chains.openapi.requests_chain import (
REQUEST_TEMPLATE,
APIRequesterChain,
APIRequesterOutputParser,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"APIRequesterChain": "langchain_community.chains.openapi.requests_chain",
"APIRequesterOutputParser": "langchain_community.chains.openapi.requests_chain",
"REQUEST_TEMPLATE": "langchain_community.chains.openapi.requests_chain",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["APIRequesterChain", "APIRequesterOutputParser", "REQUEST_TEMPLATE"]
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
def test_load_bytes():
uri = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
audio_bytes = uri.load_bytes()
assert isinstance(audio_bytes, bytes)
assert isinstance(audio_bytes, AudioBytes)
assert len(audio_bytes) > 0
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioTorchTensor, AudioUrl
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[
*[file for file in AUDIO_FILES],
REMOTE_AUDIO_FILE,
],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'my/local/text/file.txt',
'my/local/text/file.png',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='AudioUrl'):
parse_obj_as(AudioUrl, path_to_file)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
__name__ = f'{cls.__name__}[{shape_str}]'
__qualname__ = f'{cls.__qualname__}[{shape_str}]'
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__validate_shape__(t, _cls._docarray_target_shape)
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__validate_getitem__(item)
return cls._create_parametrized_type(target_shape)
@classmethod
@abc.abstractmethod
def __docarray_stack__(cls: Type[T], seq: Union[List[T], Tuple[T]]) -> T:
"""Stack a sequence of tensors into a single tensor."""
...
|
import abc
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, Tuple, Type, TypeVar
from docarray.typing.abstract_type import AbstractType
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AbstractTensor')
ShapeT = TypeVar('ShapeT')
class AbstractTensor(AbstractType, Generic[ShapeT], ABC):
__parametrized_meta__ = type
@classmethod
@abc.abstractmethod
def __validate_shape__(cls, t: T, shape: Tuple[int]) -> T:
"""Every tensor has to implement this method in order to
enable syntax of the form Tensor[shape].
It is called when a tensor is assigned to a field of this type.
i.e. when a tensor is passed to a Document field of type Tensor[shape].
The intended behaviour is as follows:
- If the shape of `t` is equal to `shape`, return `t`.
- If the shape of `t` is not equal to `shape`,
but can be reshaped to `shape`, return `t` reshaped to `shape`.
- If the shape of `t` is not equal to `shape`
and cannot be reshaped to `shape`, raise a ValueError.
:param t: The tensor to validate.
:param shape: The shape to validate against.
:return: The validated tensor.
"""
...
@classmethod
def __validate_getitem__(cls, item: Any) -> Tuple[int]:
"""This method validates the input to __class_getitem__.
It is called at "class creation time",
i.e. when a class is created with syntax of the form Tensor[shape].
The default implementation tries to cast any `item` to a tuple of ints.
A subclass can override this method to implement custom validation logic.
The output of this is eventually passed to
{ref}`AbstractTensor.__validate_shape__` as its `shape` argument.
Raises `ValueError` if the input `item` does not pass validation.
:param item: The item to validate, passed to __class_getitem__ (`Tensor[item]`).
:return: The validated item == the target shape of this tensor.
"""
if isinstance(item, int):
item = (item,)
try:
item = tuple(item)
except TypeError:
raise TypeError(f'{item} is not a valid tensor shape.')
return item
@classmethod
def _create_parametrized_type(cls: Type[T], shape: Tuple[int]):
shape_str = ', '.join([str(s) for s in shape])
class _ParametrizedTensor(
cls, # type: ignore
metaclass=cls.__parametrized_meta__, # type: ignore
):
_docarray_target_shape = shape
__name__ = f'{cls.__name__}[{shape_str}]'
__qualname__ = f'{cls.__qualname__}[{shape_str}]'
@classmethod
def validate(
_cls,
value: Any,
field: 'ModelField',
config: 'BaseConfig',
):
t = super().validate(value, field, config)
return _cls.__validate_shape__(t, _cls._docarray_target_shape)
return _ParametrizedTensor
def __class_getitem__(cls, item: Any):
target_shape = cls.__validate_getitem__(item)
return cls._create_parametrized_type(target_shape)
|
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
from torch import Tensor
from torchaudio import AudioMetaData
def load(
filepath: Union[str, Path],
out: Optional[Tensor] = None,
normalization: Union[bool, float, Callable] = True,
channels_first: bool = True,
num_frames: int = 0,
offset: int = 0,
filetype: Optional[str] = None,
) -> Tuple[Tensor, int]:
raise RuntimeError("No audio I/O backend is available.")
def save(filepath: str, src: Tensor, sample_rate: int, precision: int = 16, channels_first: bool = True) -> None:
raise RuntimeError("No audio I/O backend is available.")
def info(filepath: str) -> AudioMetaData:
raise RuntimeError("No audio I/O backend is available.")
|
from pathlib import Path
from typing import Callable, Optional, Tuple, Union
from torch import Tensor
def load(
filepath: Union[str, Path],
out: Optional[Tensor] = None,
normalization: Union[bool, float, Callable] = True,
channels_first: bool = True,
num_frames: int = 0,
offset: int = 0,
filetype: Optional[str] = None,
) -> Tuple[Tensor, int]:
raise RuntimeError("No audio I/O backend is available.")
def save(filepath: str, src: Tensor, sample_rate: int, precision: int = 16, channels_first: bool = True) -> None:
raise RuntimeError("No audio I/O backend is available.")
def info(filepath: str) -> None:
raise RuntimeError("No audio I/O backend is available.")
|
"""Tool for the Metaphor search API."""
from typing import Dict, List, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper
@deprecated(
since="0.0.15",
removal="1.0",
alternative="langchain_exa.ExaSearchResults",
)
class MetaphorSearchResults(BaseTool):
"""Tool that queries the Metaphor Search API and gets back json."""
name: str = "metaphor_search_results_json"
description: str = (
"A wrapper around Metaphor Search. "
"Input should be a Metaphor-optimized query. "
"Output is a JSON array of the query results"
)
api_wrapper: MetaphorSearchAPIWrapper
def _run(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.api_wrapper.results(
query,
num_results,
include_domains,
exclude_domains,
start_crawl_date,
end_crawl_date,
start_published_date,
end_published_date,
use_autoprompt,
)
except Exception as e:
return repr(e)
async def _arun(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool asynchronously."""
try:
return await self.api_wrapper.results_async(
query,
num_results,
include_domains,
exclude_domains,
start_crawl_date,
end_crawl_date,
start_published_date,
end_published_date,
use_autoprompt,
)
except Exception as e:
return repr(e)
|
"""Tool for the Metaphor search API."""
from typing import Dict, List, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper
@deprecated(
since="0.0.15",
removal="1.0",
alternative="langchain_exa.ExaSearchResults",
)
class MetaphorSearchResults(BaseTool): # type: ignore[override]
"""Tool that queries the Metaphor Search API and gets back json."""
name: str = "metaphor_search_results_json"
description: str = (
"A wrapper around Metaphor Search. "
"Input should be a Metaphor-optimized query. "
"Output is a JSON array of the query results"
)
api_wrapper: MetaphorSearchAPIWrapper
def _run(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.api_wrapper.results(
query,
num_results,
include_domains,
exclude_domains,
start_crawl_date,
end_crawl_date,
start_published_date,
end_published_date,
use_autoprompt,
)
except Exception as e:
return repr(e)
async def _arun(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool asynchronously."""
try:
return await self.api_wrapper.results_async(
query,
num_results,
include_domains,
exclude_domains,
start_crawl_date,
end_crawl_date,
start_published_date,
end_published_date,
use_autoprompt,
)
except Exception as e:
return repr(e)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.inception_v3 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_v3 import (
preprocess_input as preprocess_input,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_v3 import InceptionV3
from keras.src.applications.inception_v3 import decode_predictions
from keras.src.applications.inception_v3 import preprocess_input
|
from typing import List, Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import SQLiteVec
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _sqlite_vec_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> SQLiteVec:
return SQLiteVec.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
table="test",
db_file=":memory:",
)
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec() -> None:
"""Test end to end construction and search."""
docsearch = _sqlite_vec_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vec_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert distances[0] < distances[1] < distances[2]
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vec_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec_search_multiple_tables() -> None:
"""Test end to end construction and search with multiple tables."""
docsearch_1 = SQLiteVec.from_texts(
fake_texts,
FakeEmbeddings(),
table="table_1",
db_file=":memory:", ## change to local storage for testing
)
docsearch_2 = SQLiteVec.from_texts(
fake_texts,
FakeEmbeddings(),
table="table_2",
db_file=":memory:",
)
output_1 = docsearch_1.similarity_search("foo", k=1)
output_2 = docsearch_2.similarity_search("foo", k=1)
assert output_1 == [Document(page_content="foo", metadata={})]
assert output_2 == [Document(page_content="foo", metadata={})]
|
from typing import List, Optional
import pytest
from langchain_core.documents import Document
from langchain_community.vectorstores import SQLiteVec
from tests.integration_tests.vectorstores.fake_embeddings import (
FakeEmbeddings,
fake_texts,
)
def _sqlite_vec_from_texts(
metadatas: Optional[List[dict]] = None, drop: bool = True
) -> SQLiteVec:
return SQLiteVec.from_texts(
fake_texts,
FakeEmbeddings(),
metadatas=metadatas,
table="test",
db_file=":memory:",
)
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec() -> None:
"""Test end to end construction and search."""
docsearch = _sqlite_vec_from_texts()
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo", metadata={})]
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec_with_score() -> None:
"""Test end to end construction and search with scores and IDs."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vec_from_texts(metadatas=metadatas)
output = docsearch.similarity_search_with_score("foo", k=3)
docs = [o[0] for o in output]
distances = [o[1] for o in output]
assert docs == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
Document(page_content="baz", metadata={"page": 2}),
]
assert distances[0] < distances[1] < distances[2]
@pytest.mark.requires("sqlite-vec")
def test_sqlitevec_add_extra() -> None:
"""Test end to end construction and MRR search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = _sqlite_vec_from_texts(metadatas=metadatas)
docsearch.add_texts(texts, metadatas)
output = docsearch.similarity_search("foo", k=10)
assert len(output) == 6
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_deployment_parser(parser, title='Base Deployment'):
"""Mixing in arguments required by a deployment into the given parser.
The Deployment doesn't have scalable features like shards, replicas and polling
:param parser: the parser instance to which we add arguments
:param title: the title of the create args group
:return: returns the created arg group
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title=title)
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
gp.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
return gp
def mixin_scalable_deployment_parser(parser):
"""Mixing in arguments required by a scalable deployment into the given parser.
The deployment is scalable and can have shards, replicas and polling
:param parser: the parser instance to which we add arguments
"""
gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/concepts/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_deployment_parser(parser, title='Base Deployment'):
"""Mixing in arguments required by a deployment into the given parser.
The Deployment doesn't have scalable features like shards, replicas and polling
:param parser: the parser instance to which we add arguments
:param title: the title of the create args group
:return: returns the created arg group
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title=title)
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
gp.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
return gp
def mixin_scalable_deployment_parser(parser):
"""Mixing in arguments required by a scalable deployment into the given parser.
The deployment is scalable and can have shards, replicas and polling
:param parser: the parser instance to which we add arguments
"""
gp = mixin_base_deployment_parser(parser, title='Scalable Deployment')
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--native',
action='store_true',
default=False,
help='If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.',
)
|
"""
Demo for using xgboost with sklearn
===================================
"""
import multiprocessing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
if __name__ == "__main__":
print("Parallel Parameter optimization")
X, y = fetch_california_housing(return_X_y=True)
# Make sure the number of threads is balanced.
xgb_model = xgb.XGBRegressor(
n_jobs=multiprocessing.cpu_count() // 2, tree_method="hist"
)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4, 6], "n_estimators": [50, 100, 200]},
verbose=1,
n_jobs=2,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
|
"""
Demo for using xgboost with sklearn
===================================
"""
import multiprocessing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
if __name__ == "__main__":
print("Parallel Parameter optimization")
X, y = fetch_california_housing(return_X_y=True)
# Make sure the number of threads is balanced.
xgb_model = xgb.XGBRegressor(
n_jobs=multiprocessing.cpu_count() // 2, tree_method="hist"
)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4, 6], "n_estimators": [50, 100, 200]},
verbose=1,
n_jobs=2,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
|
from langchain_core.documents import Document
from langchain_core.language_models import FakeListChatModel
from langchain.retrievers.document_compressors import LLMChainExtractor
def test_llm_chain_extractor() -> None:
documents = [
Document(
page_content=(
"The sky is blue. Candlepin bowling is popular in New England."
),
metadata={"a": 1},
),
Document(
page_content=(
"Mercury is the closest planet to the Sun. "
"Candlepin bowling balls are smaller."
),
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(
responses=[
"Candlepin bowling is popular in New England.",
"Candlepin bowling balls are smaller.",
"NO_OUTPUT",
],
)
doc_compressor = LLMChainExtractor.from_llm(llm)
output = doc_compressor.compress_documents(
documents,
"Tell me about Candlepin bowling.",
)
expected = documents = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.",
metadata={"b": 2},
),
]
assert output == expected
async def test_llm_chain_extractor_async() -> None:
documents = [
Document(
page_content=(
"The sky is blue. Candlepin bowling is popular in New England."
),
metadata={"a": 1},
),
Document(
page_content=(
"Mercury is the closest planet to the Sun. "
"Candlepin bowling balls are smaller."
),
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(
responses=[
"Candlepin bowling is popular in New England.",
"Candlepin bowling balls are smaller.",
"NO_OUTPUT",
],
)
doc_compressor = LLMChainExtractor.from_llm(llm)
output = await doc_compressor.acompress_documents(
documents,
"Tell me about Candlepin bowling.",
)
expected = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.",
metadata={"b": 2},
),
]
assert output == expected
|
from langchain_core.documents import Document
from langchain_core.language_models import FakeListChatModel
from langchain.retrievers.document_compressors import LLMChainExtractor
def test_llm_chain_extractor() -> None:
documents = [
Document(
page_content=(
"The sky is blue. Candlepin bowling is popular in New England."
),
metadata={"a": 1},
),
Document(
page_content=(
"Mercury is the closest planet to the Sun. "
"Candlepin bowling balls are smaller."
),
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(
responses=[
"Candlepin bowling is popular in New England.",
"Candlepin bowling balls are smaller.",
"NO_OUTPUT",
]
)
doc_compressor = LLMChainExtractor.from_llm(llm)
output = doc_compressor.compress_documents(
documents, "Tell me about Candlepin bowling."
)
expected = documents = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.", metadata={"b": 2}
),
]
assert output == expected
async def test_llm_chain_extractor_async() -> None:
documents = [
Document(
page_content=(
"The sky is blue. Candlepin bowling is popular in New England."
),
metadata={"a": 1},
),
Document(
page_content=(
"Mercury is the closest planet to the Sun. "
"Candlepin bowling balls are smaller."
),
metadata={"b": 2},
),
Document(page_content="The moon is round.", metadata={"c": 3}),
]
llm = FakeListChatModel(
responses=[
"Candlepin bowling is popular in New England.",
"Candlepin bowling balls are smaller.",
"NO_OUTPUT",
]
)
doc_compressor = LLMChainExtractor.from_llm(llm)
output = await doc_compressor.acompress_documents(
documents, "Tell me about Candlepin bowling."
)
expected = [
Document(
page_content="Candlepin bowling is popular in New England.",
metadata={"a": 1},
),
Document(
page_content="Candlepin bowling balls are smaller.", metadata={"b": 2}
),
]
assert output == expected
|
from enum import Enum
from typing import Callable, List, Union
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise_manhattan_sim,
)
class SimilarityFunction(Enum):
"""
Enum class for supported similarity functions. The following functions are supported:
- ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
- ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product similarity
- ``SimilarityFunction.EUCLIDEAN`` (``"euclidean"``): Euclidean distance
- ``SimilarityFunction.MANHATTAN`` (``"manhattan"``): Manhattan distance
"""
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function name or enum value to the corresponding similarity function.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The corresponding similarity function.
Raises:
ValueError: If the provided function is not supported.
Example:
>>> similarity_fn = SimilarityFunction.to_similarity_fn("cosine")
>>> similarity_scores = similarity_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([[0.3952, 0.0554],
[0.0992, 0.1570]])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function into a pairwise similarity function.
The pairwise similarity function returns the diagonal vector from the similarity matrix, i.e. it only
computes the similarity(a[i], b[i]) for each i in the range of the input tensors, rather than
computing the similarity between all pairs of a and b.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The pairwise similarity function.
Raises:
ValueError: If the provided similarity function is not supported.
Example:
>>> pairwise_fn = SimilarityFunction.to_similarity_pairwise_fn("cosine")
>>> similarity_scores = pairwise_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([0.3952, 0.1570])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values() -> List[str]:
"""
Returns a list of possible values for the SimilarityFunction enum.
Returns:
list: A list of possible values for the SimilarityFunction enum.
Example:
>>> possible_values = SimilarityFunction.possible_values()
>>> possible_values
['cosine', 'dot', 'euclidean', 'manhattan']
"""
return [m.value for m in SimilarityFunction]
|
from enum import Enum
from typing import Callable, Union
from numpy import ndarray
from torch import Tensor
from .util import (
cos_sim,
dot_score,
euclidean_sim,
manhattan_sim,
pairwise_cos_sim,
pairwise_dot_score,
pairwise_euclidean_sim,
pairwise_manhattan_sim,
)
class SimilarityFunction(Enum):
"""
Enum class for supported similarity functions. The following functions are supported:
- ``SimilarityFunction.COSINE`` (``"cosine"``): Cosine similarity
- ``SimilarityFunction.DOT_PRODUCT`` (``"dot"``, ``dot_product``): Dot product similarity
- ``SimilarityFunction.EUCLIDEAN`` (``"euclidean"``): Euclidean distance
- ``SimilarityFunction.MANHATTAN`` (``"manhattan"``): Manhattan distance
"""
COSINE = "cosine"
DOT_PRODUCT = "dot"
DOT = "dot" # Alias for DOT_PRODUCT
EUCLIDEAN = "euclidean"
MANHATTAN = "manhattan"
@staticmethod
def to_similarity_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function name or enum value to the corresponding similarity function.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The corresponding similarity function.
Raises:
ValueError: If the provided function is not supported.
Example:
>>> similarity_fn = SimilarityFunction.to_similarity_fn("cosine")
>>> similarity_scores = similarity_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([[0.3952, 0.0554],
[0.0992, 0.1570]])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def to_similarity_pairwise_fn(
similarity_function: Union[str, "SimilarityFunction"],
) -> Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]:
"""
Converts a similarity function into a pairwise similarity function.
The pairwise similarity function returns the diagonal vector from the similarity matrix, i.e. it only
computes the similarity(a[i], b[i]) for each i in the range of the input tensors, rather than
computing the similarity between all pairs of a and b.
Args:
similarity_function (Union[str, SimilarityFunction]): The name or enum value of the similarity function.
Returns:
Callable[[Union[Tensor, ndarray], Union[Tensor, ndarray]], Tensor]: The pairwise similarity function.
Raises:
ValueError: If the provided similarity function is not supported.
Example:
>>> pairwise_fn = SimilarityFunction.to_similarity_pairwise_fn("cosine")
>>> similarity_scores = pairwise_fn(embeddings1, embeddings2)
>>> similarity_scores
tensor([0.3952, 0.1570])
"""
similarity_function = SimilarityFunction(similarity_function)
if similarity_function == SimilarityFunction.COSINE:
return pairwise_cos_sim
if similarity_function == SimilarityFunction.DOT_PRODUCT:
return pairwise_dot_score
if similarity_function == SimilarityFunction.MANHATTAN:
return pairwise_manhattan_sim
if similarity_function == SimilarityFunction.EUCLIDEAN:
return pairwise_euclidean_sim
raise ValueError(
"The provided function {} is not supported. Use one of the supported values: {}.".format(
similarity_function, SimilarityFunction.possible_values()
)
)
@staticmethod
def possible_values():
"""
Returns a list of possible values for the SimilarityFunction enum.
Returns:
list: A list of possible values for the SimilarityFunction enum.
Example:
>>> possible_values = SimilarityFunction.possible_values()
>>> possible_values
['cosine', 'dot', 'euclidean', 'manhattan']
"""
return [m.value for m in SimilarityFunction]
|
import types
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticDocIndex # noqa: F401
from docarray.index.backends.elasticv7 import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
__all__ = []
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticDocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elasticv7 as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
import types
from typing import TYPE_CHECKING
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.index.backends.elastic import ElasticV7DocIndex # noqa: F401
from docarray.index.backends.hnswlib import HnswDocumentIndex # noqa: F401
__all__ = []
def __getattr__(name: str):
lib: types.ModuleType
if name == 'HnswDocumentIndex':
import_library('hnswlib', raise_error=True)
import docarray.index.backends.hnswlib as lib
elif name == 'ElasticV7DocIndex':
import_library('elasticsearch', raise_error=True)
import docarray.index.backends.elastic as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
index_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return index_cls
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def __init__(self):
self.time_sec_tot = 0
self.start_iter = 0
def before_run(self, runner) -> None:
"""Synchronize the number of iterations with the runner.
Args:
runner: The runner of the training, validation or testing
process.
"""
self.start_iter = runner.iter
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record timestamp before start an epoch.
Args:
runner (Runner): The runner of the training validation and
testing process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Calculating time for loading data and updating "data_time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training, validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update data loading time in `runner.message_hub`.
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Calculating time for an iteration and updating "time"
``HistoryBuffer`` of ``runner.message_hub``.
Args:
runner (Runner): The runner of the training validation and
testing process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# Update iteration time in `runner.message_hub`.
message_hub = runner.message_hub
message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
window_size = runner.log_processor.window_size
# Calculate eta every `window_size` iterations. Since test and val
# loop will not update runner.iter, use `every_n_innter_iters`to check
# the interval.
if self.every_n_inner_iters(batch_idx, window_size):
iter_time = message_hub.get_scalar(f'{mode}/time').mean(
window_size)
if mode == 'train':
self.time_sec_tot += iter_time * window_size
# Calculate average iterative time.
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
# Calculate eta.
eta_sec = time_sec_avg * (
runner.train_loop.max_iters - runner.iter - 1)
runner.message_hub.update_info('eta', eta_sec)
else:
if mode == 'val':
cur_dataloader = runner.val_loop.dataloader
else:
cur_dataloader = runner.test_loop.dataloader
eta_sec = iter_time * (len(cur_dataloader) - batch_idx - 1)
runner.message_hub.update_info('eta', eta_sec)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
_base_ = 'cascade-mask-rcnn_regnetx-3.2GF_fpn_ms-3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')),
neck=dict(
type='FPN',
in_channels=[64, 128, 288, 672],
out_channels=256,
num_outs=5))
|
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_800mf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')),
neck=dict(
type='FPN',
in_channels=[64, 128, 288, 672],
out_channels=256,
num_outs=5))
|
from hypothesis import given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(20)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
})
coord_strategy = strategies.fixed_dictionaries({
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
def train_result(param, dmat, num_rounds):
result = {}
xgb.train(
param,
dmat,
num_rounds,
evals=[(dmat, "train")],
verbose_eval=False,
evals_result=result,
)
return result
class TestLinear:
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate(self, param, num_rounds, dataset, coord_param):
param['updater'] = 'coord_descent'
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy,
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, alpha, lambd):
param['updater'] = 'coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(
parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy()
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun(self, param, num_rounds, dataset):
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
sampled_result = (result[0], result[-1])
else:
sampled_result = result
assert tm.non_increasing(sampled_result)
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 1.0),
strategies.floats(1e-5, 1.0)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd):
param['updater'] = 'shotgun'
param['alpha'] = alpha
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
|
from hypothesis import given, note, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
pytestmark = tm.timeout(20)
parameter_strategy = strategies.fixed_dictionaries({
'booster': strategies.just('gblinear'),
'eta': strategies.floats(0.01, 0.25),
'tolerance': strategies.floats(1e-5, 1e-2),
'nthread': strategies.integers(1, 4),
})
coord_strategy = strategies.fixed_dictionaries({
'feature_selector': strategies.sampled_from(['cyclic', 'shuffle',
'greedy', 'thrifty']),
'top_k': strategies.integers(1, 10),
})
def train_result(param, dmat, num_rounds):
result = {}
xgb.train(param, dmat, num_rounds, [(dmat, 'train')], verbose_eval=False,
evals_result=result)
return result
class TestLinear:
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate(self, param, num_rounds, dataset, coord_param):
param['updater'] = 'coord_descent'
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing(result, 5e-4)
# Loss is not guaranteed to always decrease because of regularisation parameters
# We test a weaker condition that the loss has not increased between the first and last
# iteration
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
coord_strategy,
strategies.floats(1e-5, 0.8),
strategies.floats(1e-5, 0.8)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_coordinate_regularised(self, param, num_rounds, dataset, coord_param, alpha, lambd):
param['updater'] = 'coord_descent'
param['alpha'] = alpha
param['lambda'] = lambd
param.update(coord_param)
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
@given(
parameter_strategy, strategies.integers(10, 50), tm.make_dataset_strategy()
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun(self, param, num_rounds, dataset):
param['updater'] = 'shotgun'
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
# shotgun is non-deterministic, so we relax the test by only using first and last
# iteration.
if len(result) > 2:
sampled_result = (result[0], result[-1])
else:
sampled_result = result
assert tm.non_increasing(sampled_result)
@given(
parameter_strategy,
strategies.integers(10, 50),
tm.make_dataset_strategy(),
strategies.floats(1e-5, 1.0),
strategies.floats(1e-5, 1.0)
)
@settings(deadline=None, max_examples=20, print_blob=True)
def test_shotgun_regularised(self, param, num_rounds, dataset, alpha, lambd):
param['updater'] = 'shotgun'
param['alpha'] = alpha
param['lambda'] = lambd
param = dataset.set_params(param)
result = train_result(param, dataset.get_dmat(), num_rounds)['train'][dataset.metric]
note(result)
assert tm.non_increasing([result[0], result[-1]])
|
"""Simple Reader that loads text relevant to a certain search keyword from subreddits."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RedditReader(BaseReader):
"""
Subreddit post and top-level comments reader for Reddit.
"""
def load_data(
self,
subreddits: List[str],
search_keys: List[str],
post_limit: Optional[int] = [10],
) -> List[Document]:
"""
Load text from relevant posts and top-level comments in subreddit(s), given keyword(s) for search.
Args:
subreddits (List[str]): List of subreddits you'd like to read from
search_keys (List[str]): List of keywords you'd like to use to search from subreddit(s)
post_limit (Optional[int]): Maximum number of posts per subreddit you'd like to read from, defaults to 10
"""
import os
import praw
from praw.models import MoreComments
reddit = praw.Reddit(
client_id=os.getenv("REDDIT_CLIENT_ID"),
client_secret=os.getenv("REDDIT_CLIENT_SECRET"),
user_agent=os.getenv("REDDIT_USER_AGENT"),
username=os.getenv("REDDIT_USERNAME"),
password=os.getenv("REDDIT_PASSWORD"),
)
posts = []
for sr in subreddits:
ml_subreddit = reddit.subreddit(sr)
for kw in search_keys:
relevant_posts = ml_subreddit.search(kw, limit=post_limit)
for post in relevant_posts:
posts.append(Document(text=post.selftext))
for top_level_comment in post.comments:
if isinstance(top_level_comment, MoreComments):
continue
posts.append(Document(text=top_level_comment.body))
return posts
|
"""Simple Reader that loads text relevant to a certain search keyword from subreddits."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RedditReader(BaseReader):
"""
Subreddit post and top-level comments reader for Reddit.
"""
def load_data(
self,
subreddits: List[str],
search_keys: List[str],
post_limit: Optional[int] = [10],
) -> List[Document]:
"""
Load text from relevant posts and top-level comments in subreddit(s), given keyword(s) for search.
Args:
subreddits (List[str]): List of subreddits you'd like to read from
search_keys (List[str]): List of keywords you'd like to use to search from subreddit(s)
post_limit (Optional[int]): Maximum number of posts per subreddit you'd like to read from, defaults to 10
"""
import os
import praw
from praw.models import MoreComments
reddit = praw.Reddit(
client_id=os.getenv("REDDIT_CLIENT_ID"),
client_secret=os.getenv("REDDIT_CLIENT_SECRET"),
user_agent=os.getenv("REDDIT_USER_AGENT"),
username=os.getenv("REDDIT_USERNAME"),
password=os.getenv("REDDIT_PASSWORD"),
)
posts = []
for sr in subreddits:
ml_subreddit = reddit.subreddit(sr)
for kw in search_keys:
relevant_posts = ml_subreddit.search(kw, limit=post_limit)
for post in relevant_posts:
posts.append(Document(text=post.selftext))
for top_level_comment in post.comments:
if isinstance(top_level_comment, MoreComments):
continue
posts.append(Document(text=top_level_comment.body))
return posts
|
import csv
import pathlib
from typing import Any, Callable, Optional, Union
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "gtsrb"
self._target_folder = (
self._base_folder / "GTSRB" / ("Training" if self._split == "train" else "Final_Test/Images")
)
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
if self._split == "train":
samples = make_dataset(str(self._target_folder), extensions=(".ppm",))
else:
with open(self._base_folder / "GT-final_test.csv") as csv_file:
samples = [
(str(self._target_folder / row["Filename"]), int(row["ClassId"]))
for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True)
]
self._samples = samples
self.transform = transform
self.target_transform = target_transform
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, index: int) -> tuple[Any, Any]:
path, target = self._samples[index]
sample = PIL.Image.open(path).convert("RGB")
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def _check_exists(self) -> bool:
return self._target_folder.is_dir()
def download(self) -> None:
if self._check_exists():
return
base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
if self._split == "train":
download_and_extract_archive(
f"{base_url}GTSRB-Training_fixed.zip",
download_root=str(self._base_folder),
md5="513f3c79a4c5141765e10e952eaa2478",
)
else:
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_Images.zip",
download_root=str(self._base_folder),
md5="c7e4e6327067d32654124b0fe9e82185",
)
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_GT.zip",
download_root=str(self._base_folder),
md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5",
)
|
import csv
import pathlib
from typing import Any, Callable, Optional, Tuple, Union
import PIL
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class GTSRB(VisionDataset):
"""`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset.
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
def __init__(
self,
root: Union[str, pathlib.Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "test"))
self._base_folder = pathlib.Path(root) / "gtsrb"
self._target_folder = (
self._base_folder / "GTSRB" / ("Training" if self._split == "train" else "Final_Test/Images")
)
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
if self._split == "train":
samples = make_dataset(str(self._target_folder), extensions=(".ppm",))
else:
with open(self._base_folder / "GT-final_test.csv") as csv_file:
samples = [
(str(self._target_folder / row["Filename"]), int(row["ClassId"]))
for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True)
]
self._samples = samples
self.transform = transform
self.target_transform = target_transform
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
path, target = self._samples[index]
sample = PIL.Image.open(path).convert("RGB")
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def _check_exists(self) -> bool:
return self._target_folder.is_dir()
def download(self) -> None:
if self._check_exists():
return
base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/"
if self._split == "train":
download_and_extract_archive(
f"{base_url}GTSRB-Training_fixed.zip",
download_root=str(self._base_folder),
md5="513f3c79a4c5141765e10e952eaa2478",
)
else:
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_Images.zip",
download_root=str(self._base_folder),
md5="c7e4e6327067d32654124b0fe9e82185",
)
download_and_extract_archive(
f"{base_url}GTSRB_Final_Test_GT.zip",
download_root=str(self._base_folder),
md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5",
)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
video, audio, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
@pytest.mark.parametrize(
'field, attr_cls',
[
('video', VideoNdArray),
('audio', AudioNdArray),
('key_frame_indices', NdArray),
],
)
def test_load_one_of_named_tuple_results(file_url, field, attr_cls):
url = parse_obj_as(VideoUrl, file_url)
result = getattr(url.load(), field)
assert isinstance(result, np.ndarray)
assert isinstance(result, attr_cls)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDocument):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load().video
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
'my/local/file.mp3',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='VideoUrl'):
parse_obj_as(VideoUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert str(proto).startswith('video_url')
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDocument
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import (
AudioNdArray,
NdArray,
VideoNdArray,
VideoTorchTensor,
VideoUrl,
)
from tests import TOYDATA_DIR
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load(file_url):
url = parse_obj_as(VideoUrl, file_url)
audio, video, indices = url.load()
assert isinstance(audio, np.ndarray)
assert isinstance(audio, AudioNdArray)
assert isinstance(video, np.ndarray)
assert isinstance(video, VideoNdArray)
assert isinstance(indices, np.ndarray)
assert isinstance(indices, NdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_key_frames(file_url):
url = parse_obj_as(VideoUrl, file_url)
key_frames = url.load_key_frames()
assert isinstance(key_frames, np.ndarray)
assert isinstance(key_frames, VideoNdArray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_load_video_url_to_video_torch_tensor_field(file_url):
class MyVideoDoc(BaseDocument):
video_url: VideoUrl
tensor: Optional[VideoTorchTensor]
doc = MyVideoDoc(video_url=file_url)
doc.tensor = doc.video_url.load_key_frames()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, VideoTorchTensor)
def test_json_schema():
schema_json_of(VideoUrl)
def test_dump_json():
url = parse_obj_as(VideoUrl, REMOTE_VIDEO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(VideoUrl, path_to_file)
assert isinstance(url, VideoUrl)
assert isinstance(url, str)
@pytest.mark.parametrize(
'path_to_file',
[
'illegal',
'https://www.google.com',
'my/local/text/file.txt',
'my/local/text/file.png',
'my/local/file.mp3',
],
)
def test_illegal_validation(path_to_file):
with pytest.raises(ValueError, match='VideoUrl'):
parse_obj_as(VideoUrl, path_to_file)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE],
)
def test_proto_video_url(file_url):
uri = parse_obj_as(VideoUrl, file_url)
proto = uri._to_node_protobuf()
assert str(proto).startswith('video_url')
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
import PIL.Image
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (str or ``pathlib.Path``): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: Union[str, Path],
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseTripletEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseTripletEvaluator,
)
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
"""
TripletEvaluator: Evaluating the model on the all_nli_dev dataset:
Accuracy Dot Similarity: 85.10%
"""
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
# => Primary metric: all_nli_dev_dot_accuracy
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8510
|
from __future__ import annotations
try:
from typing import Self
except ImportError:
from typing_extensions import Self
import torch
from torch import nn
from sentence_transformers.models.Module import Module
class LSTM(Module):
"""Bidirectional LSTM running over word embeddings."""
config_keys: list[str] = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
config_file_name: str = "lstm_config.json"
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
super().__init__()
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
# Saving LSTM models with Safetensors does not work unless the weights are on CPU
# See https://github.com/UKPLab/sentence-transformers/pull/2722
device = next(self.parameters()).device
self.cpu()
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
self.to(device)
@classmethod
def load(
cls,
model_name_or_path: str,
subfolder: str = "",
token: bool | str | None = None,
cache_folder: str | None = None,
revision: str | None = None,
local_files_only: bool = False,
**kwargs,
) -> Self:
hub_kwargs = {
"subfolder": subfolder,
"token": token,
"cache_folder": cache_folder,
"revision": revision,
"local_files_only": local_files_only,
}
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
model = cls(**config)
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> list[int]:
raise NotImplementedError()
def save(self, output_path: str, safe_serialization: bool = True):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
device = next(self.parameters()).device
if safe_serialization:
save_safetensors_model(self.cpu(), os.path.join(output_path, "model.safetensors"))
self.to(device)
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json")) as fIn:
config = json.load(fIn)
model = LSTM(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
"""Callback Handler streams to stdout on new llm token."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any
from typing_extensions import override
from langchain_core.callbacks.base import BaseCallbackHandler
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Callback handler for streaming. Only works with LLMs that support streaming."""
def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
"""Run when LLM starts running.
Args:
serialized (dict[str, Any]): The serialized LLM.
prompts (list[str]): The prompts to run.
**kwargs (Any): Additional keyword arguments.
"""
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
**kwargs: Any,
) -> None:
"""Run when LLM starts running.
Args:
serialized (dict[str, Any]): The serialized LLM.
messages (list[list[BaseMessage]]): The messages to run.
**kwargs (Any): Additional keyword arguments.
"""
@override
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled.
Args:
token (str): The new token.
**kwargs (Any): Additional keyword arguments.
"""
sys.stdout.write(token)
sys.stdout.flush()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The response from the LLM.
**kwargs (Any): Additional keyword arguments.
"""
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors.
Args:
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Run when a chain starts running.
Args:
serialized (dict[str, Any]): The serialized chain.
inputs (dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Run when a chain ends running.
Args:
outputs (dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors.
Args:
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_start(
self, serialized: dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when the tool starts running.
Args:
serialized (dict[str, Any]): The serialized tool.
input_str (str): The input string.
**kwargs (Any): Additional keyword arguments.
"""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (Any): The output of the tool.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors.
Args:
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on an arbitrary text.
Args:
text (str): The text to print.
**kwargs (Any): Additional keyword arguments.
"""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
**kwargs (Any): Additional keyword arguments.
"""
|
"""Callback Handler streams to stdout on new llm token."""
from __future__ import annotations
import sys
from typing import TYPE_CHECKING, Any
from typing_extensions import override
from langchain_core.callbacks.base import BaseCallbackHandler
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.outputs import LLMResult
class StreamingStdOutCallbackHandler(BaseCallbackHandler):
"""Callback handler for streaming. Only works with LLMs that support streaming."""
def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
prompts (List[str]): The prompts to run.
**kwargs (Any): Additional keyword arguments.
"""
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
**kwargs: Any,
) -> None:
"""Run when LLM starts running.
Args:
serialized (Dict[str, Any]): The serialized LLM.
messages (List[List[BaseMessage]]): The messages to run.
**kwargs (Any): Additional keyword arguments.
"""
@override
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled.
Args:
token (str): The new token.
**kwargs (Any): Additional keyword arguments.
"""
sys.stdout.write(token)
sys.stdout.flush()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running.
Args:
response (LLMResult): The response from the LLM.
**kwargs (Any): Additional keyword arguments.
"""
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors.
Args:
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_start(
self, serialized: dict[str, Any], inputs: dict[str, Any], **kwargs: Any
) -> None:
"""Run when a chain starts running.
Args:
serialized (Dict[str, Any]): The serialized chain.
inputs (Dict[str, Any]): The inputs to the chain.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_end(self, outputs: dict[str, Any], **kwargs: Any) -> None:
"""Run when a chain ends running.
Args:
outputs (Dict[str, Any]): The outputs of the chain.
**kwargs (Any): Additional keyword arguments.
"""
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors.
Args:
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_start(
self, serialized: dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when the tool starts running.
Args:
serialized (Dict[str, Any]): The serialized tool.
input_str (str): The input string.
**kwargs (Any): Additional keyword arguments.
"""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action.
Args:
action (AgentAction): The agent action.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_end(self, output: Any, **kwargs: Any) -> None:
"""Run when tool ends running.
Args:
output (Any): The output of the tool.
**kwargs (Any): Additional keyword arguments.
"""
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors.
Args:
error (BaseException): The error that occurred.
**kwargs (Any): Additional keyword arguments.
"""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on an arbitrary text.
Args:
text (str): The text to print.
**kwargs (Any): Additional keyword arguments.
"""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on the agent end.
Args:
finish (AgentFinish): The agent finish.
**kwargs (Any): Additional keyword arguments.
"""
|
"""Zendesk reader."""
import json
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class ZendeskReader(BaseReader):
"""
Zendesk reader. Reads data from a Zendesk workspace.
Args:
zendesk_subdomain (str): Zendesk subdomain
locale (str): Locale of articles
"""
def __init__(self, zendesk_subdomain: str, locale: str = "en-us") -> None:
"""Initialize Zendesk reader."""
self.zendesk_subdomain = zendesk_subdomain
self.locale = locale
def load_data(self) -> List[Document]:
"""
Load data from the workspace.
Args:
workspace_id (str): Workspace ID.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup
results = []
articles = self.get_all_articles()
for article in articles:
body = article["body"]
if body is None:
continue
soup = BeautifulSoup(body, "html.parser")
body = soup.get_text()
extra_info = {
"id": article["id"],
"title": article["title"],
"url": article["html_url"],
"updated_at": article["updated_at"],
}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
def get_all_articles(self):
articles = []
next_page = None
while True:
response = self.get_articles_page(next_page)
articles.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return articles
def get_articles_page(self, next_page: str = None):
import requests
if next_page is None:
url = f"https://{self.zendesk_subdomain}.zendesk.com/api/v2/help_center/{self.locale}/articles?per_page=100"
else:
url = next_page
response = requests.get(url)
response_json = json.loads(response.text)
next_page = response_json.get("next_page", None)
articles = response_json.get("articles", [])
return {"articles": articles, "next_page": next_page}
|
"""Zendesk reader."""
import json
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class ZendeskReader(BaseReader):
"""Zendesk reader. Reads data from a Zendesk workspace.
Args:
zendesk_subdomain (str): Zendesk subdomain
locale (str): Locale of articles
"""
def __init__(self, zendesk_subdomain: str, locale: str = "en-us") -> None:
"""Initialize Zendesk reader."""
self.zendesk_subdomain = zendesk_subdomain
self.locale = locale
def load_data(self) -> List[Document]:
"""Load data from the workspace.
Args:
workspace_id (str): Workspace ID.
Returns:
List[Document]: List of documents.
"""
from bs4 import BeautifulSoup
results = []
articles = self.get_all_articles()
for article in articles:
body = article["body"]
if body is None:
continue
soup = BeautifulSoup(body, "html.parser")
body = soup.get_text()
extra_info = {
"id": article["id"],
"title": article["title"],
"url": article["html_url"],
"updated_at": article["updated_at"],
}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
def get_all_articles(self):
articles = []
next_page = None
while True:
response = self.get_articles_page(next_page)
articles.extend(response["articles"])
next_page = response["next_page"]
if next_page is None:
break
return articles
def get_articles_page(self, next_page: str = None):
import requests
if next_page is None:
url = f"https://{self.zendesk_subdomain}.zendesk.com/api/v2/help_center/{self.locale}/articles?per_page=100"
else:
url = next_page
response = requests.get(url)
response_json = json.loads(response.text)
next_page = response_json.get("next_page", None)
articles = response_json.get("articles", [])
return {"articles": articles, "next_page": next_page}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.