input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import os
import tempfile
from abc import ABC
from pathlib import Path
from typing import List, Union
from urllib.parse import urlparse
import requests
from langchain_community.docstore.document import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers import VsdxParser
class VsdxLoader(BaseLoader, ABC):
def __init__(self, file_path: Union[str, Path]):
"""Initialize with file path."""
self.file_path = str(file_path)
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
self.parser = VsdxParser()
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def load(self) -> List[Document]:
blob = Blob.from_path(self.file_path)
return list(self.parser.parse(blob))
|
import os
import tempfile
from abc import ABC
from pathlib import Path
from typing import List, Union
from urllib.parse import urlparse
import requests
from langchain_community.docstore.document import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.blob_loaders import Blob
from langchain_community.document_loaders.parsers import VsdxParser
class VsdxLoader(BaseLoader, ABC):
def __init__(self, file_path: Union[str, Path]):
"""Initialize with file path."""
self.file_path = str(file_path)
if "~" in self.file_path:
self.file_path = os.path.expanduser(self.file_path)
# If the file is a web path, download it to a temporary file, and use that
if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path):
r = requests.get(self.file_path)
if r.status_code != 200:
raise ValueError(
"Check the url of your file; returned status code %s"
% r.status_code
)
self.web_path = self.file_path
self.temp_file = tempfile.NamedTemporaryFile()
self.temp_file.write(r.content)
self.file_path = self.temp_file.name
elif not os.path.isfile(self.file_path):
raise ValueError("File path %s is not a valid file or url" % self.file_path)
self.parser = VsdxParser() # type: ignore[misc]
def __del__(self) -> None:
if hasattr(self, "temp_file"):
self.temp_file.close()
@staticmethod
def _is_valid_url(url: str) -> bool:
"""Check if the url is valid."""
parsed = urlparse(url)
return bool(parsed.netloc) and bool(parsed.scheme)
def load(self) -> List[Document]:
blob = Blob.from_path(self.file_path) # type: ignore[attr-defined]
return list(self.parser.parse(blob))
|
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "int8", "binary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using usearch
rescore_multiplier = 4
results, search_time = semantic_search_usearch(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from sentence_transformers import SentenceTransformer
from sentence_transformers.quantization import quantize_embeddings, semantic_search_usearch
from datasets import load_dataset
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train").map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
num_queries = 1_000
queries = corpus[:num_queries]
# 2. Load the model
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1")
# 3. Encode the corpus
full_corpus_embeddings = model.encode(corpus, normalize_embeddings=True, show_progress_bar=True)
# 4. Encode the queries using the full precision
query_embeddings = model.encode(queries, normalize_embeddings=True)
for exact in (True, False):
for corpus_precision in ("float32", "int8", "binary"):
corpus_embeddings = quantize_embeddings(full_corpus_embeddings, precision=corpus_precision)
# NOTE: We can also pass "precision=..." to the encode method to quantize the embeddings directly,
# but we want to keep the full precision embeddings to act as a calibration dataset for quantizing
# the query embeddings. This is important only if you are using uint8 or int8 precision
# 5. Perform semantic search using usearch
rescore_multiplier = 4
results, search_time = semantic_search_usearch(
query_embeddings,
corpus_embeddings=corpus_embeddings,
corpus_precision=corpus_precision,
top_k=10,
calibration_embeddings=full_corpus_embeddings,
rescore=corpus_precision != "float32",
rescore_multiplier=rescore_multiplier,
exact=exact,
)
print(
f"{'Exact' if exact else 'Approximate'} search time using {corpus_precision} corpus: {search_time:.6f} seconds"
+ (f" (rescore_multiplier: {rescore_multiplier})" if corpus_precision != "float32" else "")
)
|
from typing import Any, Dict, Type, TypeVar
from pydantic.tools import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
T = TypeVar('T', bound='ProtoMixin')
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls: Type[T], pb_msg: 'DocumentProto') -> T:
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
if content_type == 'tensor':
fields[field] = Tensor._read_from_proto(value.tensor)
elif content_type == 'torch_tensor':
fields[field] = TorchTensor._read_from_proto(value.torch_tensor)
elif content_type == 'embedding':
fields[field] = Embedding._read_from_proto(value.embedding)
elif content_type == 'any_url':
fields[field] = parse_obj_as(AnyUrl, value.any_url)
elif content_type == 'image_url':
fields[field] = parse_obj_as(ImageUrl, value.image_url)
elif content_type == 'id':
fields[field] = parse_obj_as(ID, value.id)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import Any, Dict
from pydantic.tools import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls, pb_msg: 'DocumentProto') -> 'ProtoMixin':
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
if content_type == 'tensor':
fields[field] = Tensor._read_from_proto(value.tensor)
elif content_type == 'torch_tensor':
fields[field] = TorchTensor._read_from_proto(value.torch_tensor)
elif content_type == 'embedding':
fields[field] = Embedding._read_from_proto(value.embedding)
elif content_type == 'any_url':
fields[field] = parse_obj_as(AnyUrl, value.any_url)
elif content_type == 'image_url':
fields[field] = parse_obj_as(ImageUrl, value.image_url)
elif content_type == 'id':
fields[field] = parse_obj_as(ID, value.id)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
import ctypes
from os import environ
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def _find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute()
dll_path = [
curr_path.parents[1],
curr_path.parents[0] / "bin",
curr_path.parents[0] / "lib",
]
if system() in ("Windows", "Microsoft"):
dll_path.append(curr_path.parents[1] / "Release")
dll_path.append(curr_path.parents[1] / "windows" / "x64" / "DLL")
dll_path = [p / "lib_lightgbm.dll" for p in dll_path]
elif system() == "Darwin":
dll_path = [p / "lib_lightgbm.dylib" for p in dll_path]
else:
dll_path = [p / "lib_lightgbm.so" for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = "\n".join(map(str, dll_path))
raise Exception(f"Cannot find lightgbm library file in following paths:\n{dll_path_joined}")
return lib_path
# we don't need lib_lightgbm while building docs
_LIB: ctypes.CDLL
if environ.get("LIGHTGBM_BUILD_DOC", False):
from unittest.mock import Mock # isort: skip
_LIB = Mock(ctypes.CDLL) # type: ignore
else:
_LIB = ctypes.cdll.LoadLibrary(_find_lib_path()[0])
|
# coding: utf-8
"""Find the path to LightGBM dynamic library files."""
from pathlib import Path
from platform import system
from typing import List
__all__: List[str] = []
def find_lib_path() -> List[str]:
"""Find the path to LightGBM library files.
Returns
-------
lib_path: list of str
List of all found library paths to LightGBM.
"""
curr_path = Path(__file__).absolute()
dll_path = [
curr_path.parents[1],
curr_path.parents[0] / "bin",
curr_path.parents[0] / "lib",
]
if system() in ("Windows", "Microsoft"):
dll_path.append(curr_path.parents[1] / "Release")
dll_path.append(curr_path.parents[1] / "windows" / "x64" / "DLL")
dll_path = [p / "lib_lightgbm.dll" for p in dll_path]
elif system() == "Darwin":
dll_path = [p / "lib_lightgbm.dylib" for p in dll_path]
else:
dll_path = [p / "lib_lightgbm.so" for p in dll_path]
lib_path = [str(p) for p in dll_path if p.is_file()]
if not lib_path:
dll_path_joined = "\n".join(map(str, dll_path))
raise Exception(f"Cannot find lightgbm library file in following paths:\n{dll_path_joined}")
return lib_path
|
import argparse
import urllib
from http import HTTPStatus
from jina.logging.predefined import default_logger
from jina.helper import parse_host_scheme
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(f'{hostname}:{port}', protocol=protocol)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.info(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.info(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.retries):
with TimeContext(
f'ping {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
r = WorkerRuntime.is_ready(args.host)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, retry (%d/%d) in 1s'
% (j + 1, args.retries)
)
else:
total_success += 1
total_time += tc.duration
time.sleep(1)
if total_success < args.retries:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.retries) * 100,
args.retries - total_success,
args.retries,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
exit(0)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import pytest
from docarray import BaseDocument
from docarray.utils.misc import is_tf_available
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowEmbedding, TensorFlowTensor
@pytest.mark.tensorflow
def test_set_tensorflow_tensor():
class MyDocument(BaseDocument):
t: TensorFlowTensor
doc = MyDocument(t=tf.zeros((3, 224, 224)))
assert isinstance(doc.t, TensorFlowTensor)
assert isinstance(doc.t.tensor, tf.Tensor)
assert tnp.allclose(doc.t.tensor, tf.zeros((3, 224, 224)))
@pytest.mark.tensorflow
def test_set_tf_embedding():
class MyDocument(BaseDocument):
embedding: TensorFlowEmbedding
doc = MyDocument(embedding=tf.zeros((128,)))
assert isinstance(doc.embedding, TensorFlowTensor)
assert isinstance(doc.embedding, TensorFlowEmbedding)
assert isinstance(doc.embedding.tensor, tf.Tensor)
assert tnp.allclose(doc.embedding.tensor, tf.zeros((128,)))
|
import pytest
from docarray import BaseDocument
try:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp # type: ignore
from docarray.typing import TensorFlowTensor
except (ImportError, TypeError):
pass
@pytest.mark.tensorflow
def test_set_tensorflow_tensor():
class MyDocument(BaseDocument):
t: TensorFlowTensor
doc = MyDocument(t=tf.zeros((3, 224, 224)))
assert isinstance(doc.t, TensorFlowTensor)
assert isinstance(doc.t.tensor, tf.Tensor)
assert tnp.allclose(doc.t.tensor, tf.zeros((3, 224, 224)))
|
import pytest
from langchain._api import suppress_langchain_deprecation_warning as sup2
from langchain_core._api import suppress_langchain_deprecation_warning as sup1
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
@pytest.mark.xfail(reason="Unknown reason")
def test_create_json_agent_migration() -> None:
"""Test the migration of create_json_agent from langchain to langchain_community."""
with sup1(), sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain",
to_package="langchain_community",
)
json_agent_migrations = [
migration
for migration in raw_migrations
if "create_json_agent" in migration[0]
]
if json_agent_migrations != [
(
"langchain.agents.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.json.base.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
]:
msg = "json_agent_migrations did not match the expected value"
raise ValueError(msg)
@pytest.mark.xfail(reason="Unknown reason")
def test_create_single_store_retriever_db() -> None:
"""Test migration from langchain to langchain_core."""
with sup1(), sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain",
to_package="langchain_core",
)
# SingleStore was an old name for VectorStoreRetriever
single_store_migration = [
migration for migration in raw_migrations if "SingleStore" in migration[0]
]
if single_store_migration != [
(
"langchain.vectorstores.singlestoredb.SingleStoreDBRetriever",
"langchain_core.vectorstores.VectorStoreRetriever",
),
]:
msg = (
"Unexpected migration: single_store_migration does not match expected "
"value"
)
raise ValueError(msg)
|
import pytest
from langchain._api import suppress_langchain_deprecation_warning as sup2
from langchain_core._api import suppress_langchain_deprecation_warning as sup1
from langchain_cli.namespaces.migrate.generate.generic import (
generate_simplified_migrations,
)
@pytest.mark.xfail(reason="Unknown reason")
def test_create_json_agent_migration() -> None:
"""Test the migration of create_json_agent from langchain to langchain_community."""
with sup1(), sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain", to_package="langchain_community"
)
json_agent_migrations = [
migration
for migration in raw_migrations
if "create_json_agent" in migration[0]
]
assert json_agent_migrations == [
(
"langchain.agents.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
(
"langchain.agents.agent_toolkits.json.base.create_json_agent",
"langchain_community.agent_toolkits.create_json_agent",
),
]
@pytest.mark.xfail(reason="Unknown reason")
def test_create_single_store_retriever_db() -> None:
"""Test migration from langchain to langchain_core."""
with sup1(), sup2():
raw_migrations = generate_simplified_migrations(
from_package="langchain", to_package="langchain_core"
)
# SingleStore was an old name for VectorStoreRetriever
single_store_migration = [
migration for migration in raw_migrations if "SingleStore" in migration[0]
]
assert single_store_migration == [
(
"langchain.vectorstores.singlestoredb.SingleStoreDBRetriever",
"langchain_core.vectorstores.VectorStoreRetriever",
),
]
|
"""Hive data reader."""
try:
from pyhive import hive
except ImportError:
raise ImportError("`hive` package not found, please run `pip install pyhive`")
try:
import sqlglot
except ImportError:
raise ImportError("`sqlglot` package not found, please run `pip install sqlglot`")
from typing import List, Optional, Tuple
from deprecated import deprecated
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class InvalidSqlError(Exception):
"""Raise when invalid SQL is passed."""
def _validate_sql_query(statements: List[str]) -> None:
if len(statements) > 1:
raise InvalidSqlError("You cannot pass multiple statements into the query")
if not statements[0].lower().startswith("select"):
raise InvalidSqlError("You must provide a SELECT query")
if "or" in statements[0].lower():
raise InvalidSqlError("The use of OR is not allowed to prevent SQL Injections")
@deprecated(
reason="llama-index-readers-hive has been deprecated since v0.3.1 on the grounds of security concerns for SQL query handling, and will thus no longer be maintained. Use this package with caution.",
version="0.3.1",
)
class HiveReader(BaseReader):
"""
Read documents from a Hive.
These documents can then be used in a downstream Llama Index data structure.
Args:
host : What host HiveServer2 runs on
port : The port Hive Server runs on. Defaults to 10000.
auth : The value of hive.server2.authentication used by HiveServer2.
Defaults to ``NONE``
database: the database name
password: Use with auth='LDAP' or auth='CUSTOM' only
"""
def __init__(
self,
host: str,
port: Optional[int] = None,
database: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
auth: Optional[str] = None,
):
"""Initialize with parameters."""
self.con = hive.Connection(
host=host,
port=port,
username=username,
database=database,
auth=auth,
password=password,
)
def load_data(
self, query: str, params: Optional[Tuple[str, ...]] = None
) -> List[Document]:
"""
Read data from the Hive.
Args:
query (str): Query with which to extract data from Hive. Parametrized values must be represented as '%s'.
params (Optional[Tuple[str, ...]): Parametrized values.
Returns:
List[Document]: A list of documents.
"""
try:
if params:
filled_query = query % tuple(repr(p) for p in params)
else:
filled_query = query
parsed_query = sqlglot.parse(filled_query)
statements = [statement.sql() for statement in parsed_query]
_validate_sql_query(statements=statements)
cursor = self.con.cursor()
cursor.execute(operation=query, parameters=params)
rows = cursor.fetchall()
except Exception:
raise Exception(
"Throws Exception in execution, please check your connection params and query."
)
documents: List[Document] = []
for row in rows:
documents.append(Document(text=str(row)))
return documents
|
"""Hive data reader."""
from typing import List, Optional
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class HiveReader(BaseReader):
"""
Read documents from a Hive.
These documents can then be used in a downstream Llama Index data structure.
Args:
host : What host HiveServer2 runs on
port : The port Hive Server runs on. Defaults to 10000.
auth : The value of hive.server2.authentication used by HiveServer2.
Defaults to ``NONE``
database: the database name
password: Use with auth='LDAP' or auth='CUSTOM' only
"""
def __init__(
self,
host: str,
port: Optional[int] = None,
database: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
auth: Optional[str] = None,
):
"""Initialize with parameters."""
try:
from pyhive import hive
except ImportError:
raise ImportError(
"`hive` package not found, please run `pip install pyhive`"
)
self.con = hive.Connection(
host=host,
port=port,
username=username,
database=database,
auth=auth,
password=password,
)
def load_data(self, query: str) -> List[Document]:
"""
Read data from the Hive.
Args:
query (str): The query used to query data from Hive
Returns:
List[Document]: A list of documents.
"""
try:
cursor = self.con.cursor().execute(query)
cursor.execute(query)
rows = cursor.fetchall()
except Exception:
raise Exception(
"Throws Exception in execution, please check your connection params and query "
)
documents = []
for row in rows:
documents = Document(text=row)
return documents
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
disabledInCI,
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoKaldi,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"disabledInCI",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from .autograd_utils import use_deterministic_algorithms
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoAudioDevice,
skipIfNoCtcDecoder,
skipIfNoCuCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoHWAccel,
skipIfNoKaldi,
skipIfNoMacOS,
skipIfNoModule,
skipIfNoQengine,
skipIfNoRIR,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoAudioDevice",
"skipIfNoCtcDecoder",
"skipIfNoCuCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoMacOS",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoRIR",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfNoHWAccel",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"use_deterministic_algorithms",
"zip_equal",
]
|
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
class MaskingTest(testing.TestCase):
def test_mask_on_eager_tensor(self):
x = ops.zeros((2, 3))
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
mask = ops.ones((2, 3))
set_keras_mask(x, mask)
self.assertIs(get_keras_mask(x), mask)
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
def test_mask_on_tracer_tensor(self):
def fn(x):
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
mask = ops.ones((2, 3))
set_keras_mask(x, mask)
self.assertIs(get_keras_mask(x), mask)
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None) # key is now deleted, should be a no-op
self.assertIsNone(get_keras_mask(x))
backend.compute_output_spec(fn, backend.KerasTensor((2, 3)))
|
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
class MaskingTest(testing.TestCase):
def test_mask_on_eager_tensor(self):
x = ops.zeros((2, 3))
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
mask = ops.ones((2, 3))
set_keras_mask(x, mask)
self.assertIs(get_keras_mask(x), mask)
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
def test_mask_on_tracer_tensor(self):
def fn(x):
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
mask = ops.ones((2, 3))
set_keras_mask(x, mask)
self.assertIs(get_keras_mask(x), mask)
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None) # key is now deleted, should be a no-op
self.assertIsNone(get_keras_mask(x))
backend.compute_output_spec(fn, backend.KerasTensor((2, 3)))
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
pass
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
openvino_skipped_tests = []
if backend() == "openvino":
with open(
"keras/src/backend/openvino/excluded_concrete_tests.txt", "r"
) as file:
openvino_skipped_tests = file.readlines()
# it is necessary to check if stripped line is not empty
# and exclude such lines
openvino_skipped_tests = [
line.strip() for line in openvino_skipped_tests if line.strip()
]
requires_trainable_backend = pytest.mark.skipif(
backend() in ["numpy", "openvino"],
reason="Trainer not implemented for NumPy and OpenVINO backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
# also, skip concrete tests for openvino, listed in the special file
# this is more granular mechanism to exclude tests rather
# than using --ignore option
for skipped_test in openvino_skipped_tests:
if skipped_test in item.nodeid:
item.add_marker(
skip_if_backend(
"openvino",
"Not supported operation by openvino backend",
)
)
def skip_if_backend(given_backend, reason):
return pytest.mark.skipif(backend() == given_backend, reason=reason)
|
import os
# When using jax.experimental.enable_x64 in unit test, we want to keep the
# default dtype with 32 bits, aligning it with Keras's default.
os.environ["JAX_DEFAULT_DTYPE_BITS"] = "32"
try:
# When using torch and tensorflow, torch needs to be imported first,
# otherwise it will segfault upon import. This should force the torch
# import to happen first for all tests.
import torch # noqa: F401
except ImportError:
pass
import pytest # noqa: E402
from keras.src.backend import backend # noqa: E402
def pytest_configure(config):
config.addinivalue_line(
"markers",
"requires_trainable_backend: mark test for trainable backend only",
)
def pytest_collection_modifyitems(config, items):
openvino_skipped_tests = []
if backend() == "openvino":
with open(
"keras/src/backend/openvino/excluded_concrete_tests.txt", "r"
) as file:
openvino_skipped_tests = file.readlines()
# it is necessary to check if stripped line is not empty
# and exclude such lines
openvino_skipped_tests = [
line.strip() for line in openvino_skipped_tests if line.strip()
]
requires_trainable_backend = pytest.mark.skipif(
backend() == "numpy" or backend() == "openvino",
reason="Trainer not implemented for NumPy and OpenVINO backend.",
)
for item in items:
if "requires_trainable_backend" in item.keywords:
item.add_marker(requires_trainable_backend)
# also, skip concrete tests for openvino, listed in the special file
# this is more granular mechanism to exclude tests rather
# than using --ignore option
for skipped_test in openvino_skipped_tests:
if skipped_test in item.nodeid:
item.add_marker(
skip_if_backend(
"openvino",
"Not supported operation by openvino backend",
)
)
def skip_if_backend(given_backend, reason):
return pytest.mark.skipif(backend() == given_backend, reason=reason)
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[1024, 512, 256],
out_channels=[512, 256, 128]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[512, 256, 128],
out_channels=[1024, 512, 256],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(608, 608),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000, # same as burn-in in darknet
warmup_ratio=0.1,
step=[218, 246])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=273)
evaluation = dict(interval=1, metric=['bbox'])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# model settings
model = dict(
type='YOLOV3',
backbone=dict(
type='Darknet',
depth=53,
out_indices=(3, 4, 5),
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')),
neck=dict(
type='YOLOV3Neck',
num_scales=3,
in_channels=[1024, 512, 256],
out_channels=[512, 256, 128]),
bbox_head=dict(
type='YOLOV3Head',
num_classes=80,
in_channels=[512, 256, 128],
out_channels=[1024, 512, 256],
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
reduction='sum'),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=2.0,
reduction='sum'),
loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0)),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 2)),
dict(
type='MinIoURandomCrop',
min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(608, 608),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=2000, # same as burn-in in darknet
warmup_ratio=0.1,
step=[218, 246])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=273)
evaluation = dict(interval=1, metric=['bbox'])
|
"""langchain-core version information and utilities."""
VERSION = "0.3.65"
|
"""langchain-core version information and utilities."""
VERSION = "0.3.64"
|
import json
import os
from typing import List
import torch
from torch import nn
class LSTM(nn.Module):
"""Bidirectional LSTM running over word embeddings."""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
import torch
from torch import nn
from typing import List
import os
import json
class LSTM(nn.Module):
"""
Bidirectional LSTM running over word embeddings.
"""
def __init__(
self,
word_embedding_dimension: int,
hidden_dim: int,
num_layers: int = 1,
dropout: float = 0,
bidirectional: bool = True,
):
nn.Module.__init__(self)
self.config_keys = ["word_embedding_dimension", "hidden_dim", "num_layers", "dropout", "bidirectional"]
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(
word_embedding_dimension,
hidden_dim,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional,
batch_first=True,
)
def forward(self, features):
token_embeddings = features["token_embeddings"]
sentence_lengths = torch.clamp(features["sentence_lengths"], min=1)
packed = nn.utils.rnn.pack_padded_sequence(
token_embeddings, sentence_lengths.cpu(), batch_first=True, enforce_sorted=False
)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({"token_embeddings": unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "lstm_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "lstm_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"))
model = LSTM(**config)
model.load_state_dict(weights)
return model
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
SimpleChatModel,
)
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = (
"LLM",
"BaseChatModel",
"BaseLLM",
"BaseLanguageModel",
"FakeListChatModel",
"FakeListLLM",
"FakeMessagesListChatModel",
"FakeStreamingListLLM",
"GenericFakeChatModel",
"LangSmithParams",
"LanguageModelInput",
"LanguageModelLike",
"LanguageModelOutput",
"ParrotFakeChatModel",
"SimpleChatModel",
"get_tokenizer",
)
_dynamic_imports = {
"BaseLanguageModel": "base",
"LangSmithParams": "base",
"LanguageModelInput": "base",
"LanguageModelLike": "base",
"LanguageModelOutput": "base",
"get_tokenizer": "base",
"BaseChatModel": "chat_models",
"SimpleChatModel": "chat_models",
"FakeListLLM": "fake",
"FakeStreamingListLLM": "fake",
"FakeListChatModel": "fake_chat_models",
"FakeMessagesListChatModel": "fake_chat_models",
"GenericFakeChatModel": "fake_chat_models",
"ParrotFakeChatModel": "fake_chat_models",
"LLM": "llms",
"BaseLLM": "llms",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""Language models.
**Language Model** is a type of model that can generate text or complete
text prompts.
LangChain has two main classes to work with language models: **Chat Models**
and "old-fashioned" **LLMs**.
**Chat Models**
Language models that use a sequence of messages as inputs and return chat messages
as outputs (as opposed to using plain text). These are traditionally newer models (
older models are generally LLMs, see below). Chat models support the assignment of
distinct roles to conversation messages, helping to distinguish messages from the AI,
users, and instructions such as system messages.
The key abstraction for chat models is `BaseChatModel`. Implementations
should inherit from this class. Please see LangChain how-to guides with more
information on how to implement a custom chat model.
To implement a custom Chat Model, inherit from `BaseChatModel`. See
the following guide for more information on how to implement a custom Chat Model:
https://python.langchain.com/docs/how_to/custom_chat_model/
**LLMs**
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are Chat Models, see below).
Although the underlying models are string in, string out, the LangChain wrappers
also allow these models to take messages as input. This gives them the same interface
as Chat Models. When messages are passed in as input, they will be formatted into a
string under the hood before being passed to the underlying model.
To implement a custom LLM, inherit from `BaseLLM` or `LLM`.
Please see the following guide for more information on how to implement a custom LLM:
https://python.langchain.com/docs/how_to/custom_llm/
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.language_models.base import (
BaseLanguageModel,
LangSmithParams,
LanguageModelInput,
LanguageModelLike,
LanguageModelOutput,
get_tokenizer,
)
from langchain_core.language_models.chat_models import (
BaseChatModel,
SimpleChatModel,
)
from langchain_core.language_models.fake import FakeListLLM, FakeStreamingListLLM
from langchain_core.language_models.fake_chat_models import (
FakeListChatModel,
FakeMessagesListChatModel,
GenericFakeChatModel,
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
__all__ = (
"BaseLanguageModel",
"BaseChatModel",
"SimpleChatModel",
"BaseLLM",
"LLM",
"LanguageModelInput",
"get_tokenizer",
"LangSmithParams",
"LanguageModelOutput",
"LanguageModelLike",
"FakeListLLM",
"FakeStreamingListLLM",
"FakeListChatModel",
"FakeMessagesListChatModel",
"GenericFakeChatModel",
"ParrotFakeChatModel",
)
_dynamic_imports = {
"BaseLanguageModel": "base",
"LangSmithParams": "base",
"LanguageModelInput": "base",
"LanguageModelLike": "base",
"LanguageModelOutput": "base",
"get_tokenizer": "base",
"BaseChatModel": "chat_models",
"SimpleChatModel": "chat_models",
"FakeListLLM": "fake",
"FakeStreamingListLLM": "fake",
"FakeListChatModel": "fake_chat_models",
"FakeMessagesListChatModel": "fake_chat_models",
"GenericFakeChatModel": "fake_chat_models",
"ParrotFakeChatModel": "fake_chat_models",
"LLM": "llms",
"BaseLLM": "llms",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import List
import numpy as np
import pytest
from jina import Document, DocumentArray, Flow
from ...torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize(
'arr_in',
[
(np.ones((224, 224, 3), dtype=np.uint8)),
(np.ones((100, 100, 3), dtype=np.uint8)),
(np.ones((50, 40, 3), dtype=np.uint8)),
],
)
def test_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test', inputs=[Document(blob=arr_in)], return_results=True
)
results_arr = DocumentArray(resp[0].data.docs)
assert len(results_arr) == 1
assert results_arr[0].embedding is not None
assert results_arr[0].embedding.shape == (512,)
def test_with_batch():
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=(
Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)
),
return_results=True,
)
assert len(resp[0].docs.get_attributes('embedding')) == 25
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 11], ['c', 0], ['cc', 0]], 'r'),
(
pytest.lazy_fixture('docs_with_chunk_blobs'),
[['r', 0], ['c', 11], ['cc', 0]],
'c',
),
(
pytest.lazy_fixture('docs_with_chunk_chunk_blobs'),
[['r', 0], ['c', 0], ['cc', 11]],
'cc',
),
],
)
def test_traversal_paths(
docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str
):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
embeddings = (
DocumentArray(res[0].docs)
.traverse_flat([path])
.get_attributes('embedding')
)
return len([em for em in embeddings if em is not None]) == count
return validate
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True,
)
assert validate_traversal(docs_per_path)(resp)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((224, 224, 3), dtype=np.uint8)),
(np.ones((100, 100, 3), dtype=np.uint8)),
(np.ones((50, 40, 3), dtype=np.uint8))
])
def test_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=[Document(blob=arr_in)],
return_results=True
)
results_arr = DocumentArray(resp[0].data.docs)
assert len(results_arr) == 1
assert results_arr[0].embedding is not None
assert results_arr[0].embedding.shape == (512, )
def test_with_batch():
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=(Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)),
return_results=True
)
assert len(resp[0].docs.get_attributes('embedding')) == 25
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 11], ['c', 0], ['cc', 0]], 'r'),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [['r', 0], ['c', 11], ['cc', 0]], 'c'),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [['r', 0], ['c', 0], ['cc', 11]], 'cc')
]
)
def test_traversal_paths(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
return len(DocumentArray(res[0].docs).traverse_flat([path]).get_attributes('embedding')) == count
return validate
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True
)
assert validate_traversal(docs_per_path)(resp)
|
from __future__ import annotations
from .model_card import SparseEncoderModelCardData
from .SparseEncoder import SparseEncoder
from .trainer import SparseEncoderTrainer
from .training_args import SparseEncoderTrainingArguments
__all__ = [
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.callbacks.splade_callbacks import (
SchedulerType,
SpladeLambdaSchedulerCallback,
)
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.model_card import SparseEncoderModelCardData
from sentence_transformers.sparse_encoder.models import IDF, CSRSparsity, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
"IDF",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Callbacks
"SpladeLambdaSchedulerCallback",
"SchedulerType",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
# Model card
"SparseEncoderModelCardData",
]
# TODO : Add tests for all the components
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmengine.config import Config, DictAction
from mmengine.fileio import load
from mmdet.datasets import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
from enum import Enum
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
similarity_score_threshold = "similarity_score_threshold"
"""Similarity search with a score threshold."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore] = None
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@model_validator(mode="before")
@classmethod
def shim_docstore(cls, values: dict) -> Any:
byte_store = values.get("byte_store")
docstore = values.get("docstore")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
raise Exception("You must pass a `byte_store` parameter.")
values["docstore"] = docstore
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> list[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = await self.vectorstore.amax_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = await self.vectorstore.asimilarity_search(
query, **self.search_kwargs
)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = await self.docstore.amget(ids)
return [d for d in docs if d is not None]
|
from enum import Enum
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.stores import BaseStore, ByteStore
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.storage._lc_store import create_kv_docstore
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
similarity_score_threshold = "similarity_score_threshold"
"""Similarity search with a score threshold."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class MultiVectorRetriever(BaseRetriever):
"""Retrieve from a set of multiple embeddings for the same document."""
vectorstore: VectorStore
"""The underlying vectorstore to use to store small chunks
and their embedding vectors"""
byte_store: Optional[ByteStore] = None
"""The lower-level backing storage layer for the parent documents"""
docstore: BaseStore[str, Document]
"""The storage interface for the parent documents"""
id_key: str = "doc_id"
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
@model_validator(mode="before")
@classmethod
def shim_docstore(cls, values: Dict) -> Any:
byte_store = values.get("byte_store")
docstore = values.get("docstore")
if byte_store is not None:
docstore = create_kv_docstore(byte_store)
elif docstore is None:
raise Exception("You must pass a `byte_store` parameter.")
values["docstore"] = docstore
return values
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = self.vectorstore.max_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = self.vectorstore.similarity_search(query, **self.search_kwargs)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = self.docstore.mget(ids)
return [d for d in docs if d is not None]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
"""Asynchronously get documents relevant to a query.
Args:
query: String to find relevant documents for
run_manager: The callbacks handler to use
Returns:
List of relevant documents
"""
if self.search_type == SearchType.mmr:
sub_docs = await self.vectorstore.amax_marginal_relevance_search(
query, **self.search_kwargs
)
elif self.search_type == SearchType.similarity_score_threshold:
sub_docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, **self.search_kwargs
)
)
sub_docs = [sub_doc for sub_doc, _ in sub_docs_and_similarities]
else:
sub_docs = await self.vectorstore.asimilarity_search(
query, **self.search_kwargs
)
# We do this to maintain the order of the ids that are returned
ids = []
for d in sub_docs:
if self.id_key in d.metadata and d.metadata[self.id_key] not in ids:
ids.append(d.metadata[self.id_key])
docs = await self.docstore.amget(ids)
return [d for d in docs if d is not None]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
from typing import Optional
from docarray.document import BaseDocument
from docarray.typing import AnyTensor, Embedding, PointCloud3DUrl
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an Embedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray import PointCloud3D
from docarray.typing import Embedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[Embedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument, PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[Embedding]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.servers import BaseServer
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
from tests.helper import _generate_pod_args
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = [port]
args.name = name
if executor:
args.uses = executor
with AsyncNewLoopRuntime(args, req_handler_cls=WorkerRequestHandler) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
with AsyncNewLoopRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--protocol',
protocol,
]
),
req_handler_cls=GatewayRequestHandler,
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['http'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
import multiprocessing
import pytest
from jina import Client
from jina.parsers import set_gateway_parser
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.servers import BaseServer
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
from jina.serve.runtimes.gateway.request_handling import GatewayRequestHandler
from tests.helper import _generate_pod_args
def _create_worker_runtime(port, name='', executor=None):
args = _generate_pod_args()
args.port = [port]
args.name = name
if executor:
args.uses = executor
with AsyncNewLoopRuntime(args, req_handler_cls=WorkerRequestHandler) as runtime:
runtime.run_forever()
def _create_gateway_runtime(graph_description, pod_addresses, port, protocol='grpc'):
with AsyncNewLoopRuntime(
set_gateway_parser().parse_args(
[
'--graph-description',
graph_description,
'--deployments-addresses',
pod_addresses,
'--port',
str(port),
'--protocol',
protocol,
]
), req_handler_cls=GatewayRequestHandler
) as runtime:
runtime.run_forever()
def _setup(worker_port, port, protocol):
graph_description = '{"start-gateway": ["pod0"], "pod0": ["end-gateway"]}'
pod_addresses = f'{{"pod0": ["0.0.0.0:{worker_port}"]}}'
# create a single worker runtime
worker_process = multiprocessing.Process(
target=_create_worker_runtime, args=(worker_port,)
)
worker_process.start()
# create a single gateway runtime
gateway_process = multiprocessing.Process(
target=_create_gateway_runtime,
args=(graph_description, pod_addresses, port, protocol),
)
gateway_process.start()
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{worker_port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
BaseServer.wait_for_ready_or_shutdown(
timeout=5.0,
ctrl_address=f'0.0.0.0:{port}',
ready_or_shutdown_event=multiprocessing.Event(),
)
return worker_process, gateway_process
@pytest.mark.parametrize('protocol', ['http'])
def test_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', port=port, protocol=protocol)
dry_run_alive = c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
async def test_async_dry_run_of_flow(port_generator, protocol):
worker_port = port_generator()
port = port_generator()
worker_process, gateway_process = _setup(worker_port, port, protocol)
# send requests to the gateway
c = Client(host='localhost', asyncio=True, port=port, protocol=protocol)
dry_run_alive = await c.is_flow_ready()
# _teardown(worker_process, gateway_process, dry_run_alive)
worker_process.terminate()
worker_process.join()
dry_run_worker_removed = await c.is_flow_ready()
gateway_process.terminate()
gateway_process.join()
assert dry_run_alive
assert not dry_run_worker_removed
assert gateway_process.exitcode == 0
assert worker_process.exitcode == 0
|
from typing import BinaryIO, Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
buffer_size: int = 4096,
) -> AudioMetaData:
s = torchaudio.lib._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
chunk = s.pop_chunks()[0]
if chunk is None:
raise RuntimeError("Failed to decode audio.")
assert chunk is not None
waveform = chunk[0]
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: BinaryIO,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
s = torchaudio.lib._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
from typing import BinaryIO, Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
if sinfo[5] == 0:
waveform, _ = _load_audio(s)
num_frames = waveform.size(1)
else:
num_frames = sinfo[5]
return AudioMetaData(
int(sinfo[8]),
num_frames,
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
buffer_size: int = 4096,
) -> AudioMetaData:
s = torchaudio.lib._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: BinaryIO,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
s = torchaudio.lib._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, buffer_size)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formatting import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, MixUp, Mosaic,
Normalize, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomFlip,
RandomShift, Resize, SegRescale, YOLOXHSVRandomAug)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate', 'RandomShift', 'Mosaic', 'MixUp',
'RandomAffine', 'YOLOXHSVRandomAug'
]
|
"""
LexRank implementation
Source: https://github.com/crabcamp/lexrank/tree/dev
"""
import logging
import numpy as np
from scipy.sparse.csgraph import connected_components
from scipy.special import softmax
logger = logging.getLogger(__name__)
def degree_centrality_scores(
similarity_matrix,
threshold=None,
increase_power=True,
):
if not (threshold is None or isinstance(threshold, float) and 0 <= threshold < 1):
raise ValueError(
"'threshold' should be a floating-point number " "from the interval [0, 1) or None",
)
if threshold is None:
markov_matrix = create_markov_matrix(similarity_matrix)
else:
markov_matrix = create_markov_matrix_discrete(
similarity_matrix,
threshold,
)
scores = stationary_distribution(
markov_matrix,
increase_power=increase_power,
normalized=False,
)
return scores
def _power_method(transition_matrix, increase_power=True, max_iter=10000):
eigenvector = np.ones(len(transition_matrix))
if len(eigenvector) == 1:
return eigenvector
transition = transition_matrix.transpose()
for _ in range(max_iter):
eigenvector_next = np.dot(transition, eigenvector)
if np.allclose(eigenvector_next, eigenvector):
return eigenvector_next
eigenvector = eigenvector_next
if increase_power:
transition = np.dot(transition, transition)
logger.warning("Maximum number of iterations for power method exceeded without convergence!")
return eigenvector_next
def connected_nodes(matrix):
_, labels = connected_components(matrix)
groups = []
for tag in np.unique(labels):
group = np.where(labels == tag)[0]
groups.append(group)
return groups
def create_markov_matrix(weights_matrix):
n_1, n_2 = weights_matrix.shape
if n_1 != n_2:
raise ValueError("'weights_matrix' should be square")
row_sum = weights_matrix.sum(axis=1, keepdims=True)
# normalize probability distribution differently if we have negative transition values
if np.min(weights_matrix) <= 0:
return softmax(weights_matrix, axis=1)
return weights_matrix / row_sum
def create_markov_matrix_discrete(weights_matrix, threshold):
discrete_weights_matrix = np.zeros(weights_matrix.shape)
ixs = np.where(weights_matrix >= threshold)
discrete_weights_matrix[ixs] = 1
return create_markov_matrix(discrete_weights_matrix)
def stationary_distribution(
transition_matrix,
increase_power=True,
normalized=True,
):
n_1, n_2 = transition_matrix.shape
if n_1 != n_2:
raise ValueError("'transition_matrix' should be square")
distribution = np.zeros(n_1)
grouped_indices = connected_nodes(transition_matrix)
for group in grouped_indices:
t_matrix = transition_matrix[np.ix_(group, group)]
eigenvector = _power_method(t_matrix, increase_power=increase_power)
distribution[group] = eigenvector
if normalized:
distribution /= n_1
return distribution
|
"""
LexRank implementation
Source: https://github.com/crabcamp/lexrank/tree/dev
"""
import numpy as np
from scipy.sparse.csgraph import connected_components
from scipy.special import softmax
import logging
logger = logging.getLogger(__name__)
def degree_centrality_scores(
similarity_matrix,
threshold=None,
increase_power=True,
):
if not (
threshold is None
or isinstance(threshold, float)
and 0 <= threshold < 1
):
raise ValueError(
'\'threshold\' should be a floating-point number '
'from the interval [0, 1) or None',
)
if threshold is None:
markov_matrix = create_markov_matrix(similarity_matrix)
else:
markov_matrix = create_markov_matrix_discrete(
similarity_matrix,
threshold,
)
scores = stationary_distribution(
markov_matrix,
increase_power=increase_power,
normalized=False,
)
return scores
def _power_method(transition_matrix, increase_power=True, max_iter=10000):
eigenvector = np.ones(len(transition_matrix))
if len(eigenvector) == 1:
return eigenvector
transition = transition_matrix.transpose()
for _ in range(max_iter):
eigenvector_next = np.dot(transition, eigenvector)
if np.allclose(eigenvector_next, eigenvector):
return eigenvector_next
eigenvector = eigenvector_next
if increase_power:
transition = np.dot(transition, transition)
logger.warning("Maximum number of iterations for power method exceeded without convergence!")
return eigenvector_next
def connected_nodes(matrix):
_, labels = connected_components(matrix)
groups = []
for tag in np.unique(labels):
group = np.where(labels == tag)[0]
groups.append(group)
return groups
def create_markov_matrix(weights_matrix):
n_1, n_2 = weights_matrix.shape
if n_1 != n_2:
raise ValueError('\'weights_matrix\' should be square')
row_sum = weights_matrix.sum(axis=1, keepdims=True)
# normalize probability distribution differently if we have negative transition values
if np.min(weights_matrix) <= 0:
return softmax(weights_matrix, axis=1)
return weights_matrix / row_sum
def create_markov_matrix_discrete(weights_matrix, threshold):
discrete_weights_matrix = np.zeros(weights_matrix.shape)
ixs = np.where(weights_matrix >= threshold)
discrete_weights_matrix[ixs] = 1
return create_markov_matrix(discrete_weights_matrix)
def stationary_distribution(
transition_matrix,
increase_power=True,
normalized=True,
):
n_1, n_2 = transition_matrix.shape
if n_1 != n_2:
raise ValueError('\'transition_matrix\' should be square')
distribution = np.zeros(n_1)
grouped_indices = connected_nodes(transition_matrix)
for group in grouped_indices:
t_matrix = transition_matrix[np.ix_(group, group)]
eigenvector = _power_method(t_matrix, increase_power=increase_power)
distribution[group] = eigenvector
if normalized:
distribution /= n_1
return distribution
|
from torch import nn, Tensor
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* :cite:`collobert2016wav2letter`.
:math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True),
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
from torch import nn, Tensor
__all__ = [
"Wav2Letter",
]
class Wav2Letter(nn.Module):
r"""Wav2Letter model architecture from *Wav2Letter: an End-to-End ConvNet-based Speech
Recognition System* [:footcite:`collobert2016wav2letter`].
:math:`\text{padding} = \frac{\text{ceil}(\text{kernel} - \text{stride})}{2}`
Args:
num_classes (int, optional): Number of classes to be classified. (Default: ``40``)
input_type (str, optional): Wav2Letter can use as input: ``waveform``, ``power_spectrum``
or ``mfcc`` (Default: ``waveform``).
num_features (int, optional): Number of input features that the network will receive (Default: ``1``).
"""
def __init__(self, num_classes: int = 40, input_type: str = "waveform", num_features: int = 1) -> None:
super(Wav2Letter, self).__init__()
acoustic_num_features = 250 if input_type == "waveform" else num_features
acoustic_model = nn.Sequential(
nn.Conv1d(in_channels=acoustic_num_features, out_channels=250, kernel_size=48, stride=2, padding=23),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=250, kernel_size=7, stride=1, padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=250, out_channels=2000, kernel_size=32, stride=1, padding=16),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=2000, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv1d(in_channels=2000, out_channels=num_classes, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
)
if input_type == "waveform":
waveform_model = nn.Sequential(
nn.Conv1d(in_channels=num_features, out_channels=250, kernel_size=250, stride=160, padding=45),
nn.ReLU(inplace=True),
)
self.acoustic_model = nn.Sequential(waveform_model, acoustic_model)
if input_type in ["power_spectrum", "mfcc"]:
self.acoustic_model = acoustic_model
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (torch.Tensor): Tensor of dimension (batch_size, num_features, input_length).
Returns:
Tensor: Predictor tensor of dimension (batch_size, number_of_classes, input_length).
"""
x = self.acoustic_model(x)
x = nn.functional.log_softmax(x, dim=1)
return x
|
from typing import Optional
import torch
from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ..utils import logging
logger = logging.get_logger(__name__)
_use_top_left_mask = flash_attn_supports_top_left_mask()
def flash_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
sliding_window: Optional[int] = None,
softcap: Optional[float] = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False) or kwargs.get("head_mask", None) is not None:
logger.warning_once(
"`flash_attention_2` does not support `output_attentions=True` or `head_mask`."
" Please set your attention to `eager` if you want any of these features."
)
# This is before the transpose
seq_len = query.shape[2]
if any(dim == 0 for dim in query.shape):
raise ValueError(
"Tensor query has shape with a zero dimension.\n"
"FlashAttention does not support inputs with dim=0.\n"
"Please check your input shapes or use SDPA instead."
)
# FA2 uses non-transposed inputs
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (usually our RMSNorm modules handle it correctly)
target_dtype = None
if query.dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(module.config, "_pre_quantization_dtype"):
target_dtype = module.config._pre_quantization_dtype
else:
target_dtype = next(layer for layer in module.modules() if isinstance(layer, torch.nn.Linear)).weight.dtype
# FA2 always relies on the value set in the module, so remove it if present in kwargs to avoid passing it twice
kwargs.pop("is_causal", None)
attn_output = _flash_attention_forward(
query,
key,
value,
attention_mask,
query_length=seq_len,
is_causal=module.is_causal,
dropout=dropout,
softmax_scale=scaling,
sliding_window=sliding_window,
softcap=softcap,
use_top_left_mask=_use_top_left_mask,
target_dtype=target_dtype,
**kwargs,
)
return attn_output, None
|
from typing import Optional
import torch
from ..modeling_flash_attention_utils import _flash_attention_forward, flash_attn_supports_top_left_mask
from ..utils import logging
logger = logging.get_logger(__name__)
_use_top_left_mask = flash_attn_supports_top_left_mask()
def flash_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
sliding_window: Optional[int] = None,
softcap: Optional[float] = None,
**kwargs,
) -> tuple[torch.Tensor, None]:
if kwargs.get("output_attentions", False) or kwargs.get("head_mask", None) is not None:
logger.warning_once(
"`flash_attention_2` does not support `output_attentions=True` or `head_mask`."
" Please set your attention to `eager` if you want any of these features."
)
# This is before the transpose
seq_len = query.shape[2]
# FA2 uses non-transposed inputs
query = query.transpose(1, 2)
key = key.transpose(1, 2)
value = value.transpose(1, 2)
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (usually our RMSNorm modules handle it correctly)
target_dtype = None
if query.dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(module.config, "_pre_quantization_dtype"):
target_dtype = module.config._pre_quantization_dtype
else:
target_dtype = next(layer for layer in module.modules() if isinstance(layer, torch.nn.Linear)).weight.dtype
# FA2 always relies on the value set in the module, so remove it if present in kwargs to avoid passing it twice
kwargs.pop("is_causal", None)
attn_output = _flash_attention_forward(
query,
key,
value,
attention_mask,
query_length=seq_len,
is_causal=module.is_causal,
dropout=dropout,
softmax_scale=scaling,
sliding_window=sliding_window,
softcap=softcap,
use_top_left_mask=_use_top_left_mask,
target_dtype=target_dtype,
**kwargs,
)
return attn_output, None
|
import logging
from typing import Literal
from github import Github
from github.PullRequestReview import PullRequestReview
from pydantic import BaseModel, SecretStr
from pydantic_settings import BaseSettings
class LabelSettings(BaseModel):
await_label: str | None = None
number: int
default_config = {"approved-2": LabelSettings(await_label="awaiting-review", number=2)}
class Settings(BaseSettings):
github_repository: str
token: SecretStr
debug: bool | None = False
config: dict[str, LabelSettings] | Literal[""] = default_config
settings = Settings()
if settings.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug(f"Using config: {settings.model_dump_json()}")
g = Github(settings.token.get_secret_value())
repo = g.get_repo(settings.github_repository)
for pr in repo.get_pulls(state="open"):
logging.info(f"Checking PR: #{pr.number}")
pr_labels = list(pr.get_labels())
pr_label_by_name = {label.name: label for label in pr_labels}
reviews = list(pr.get_reviews())
review_by_user: dict[str, PullRequestReview] = {}
for review in reviews:
if review.user.login in review_by_user:
stored_review = review_by_user[review.user.login]
if review.submitted_at >= stored_review.submitted_at:
review_by_user[review.user.login] = review
else:
review_by_user[review.user.login] = review
approved_reviews = [
review for review in review_by_user.values() if review.state == "APPROVED"
]
config = settings.config or default_config
for approved_label, conf in config.items():
logging.debug(f"Processing config: {conf.model_dump_json()}")
if conf.await_label is None or (conf.await_label in pr_label_by_name):
logging.debug(f"Processable PR: {pr.number}")
if len(approved_reviews) >= conf.number:
logging.info(f"Adding label to PR: {pr.number}")
pr.add_to_labels(approved_label)
if conf.await_label:
logging.info(f"Removing label from PR: {pr.number}")
pr.remove_from_labels(conf.await_label)
logging.info("Finished")
|
import logging
from typing import Literal
from github import Github
from github.PullRequestReview import PullRequestReview
from pydantic import BaseModel, SecretStr
from pydantic_settings import BaseSettings
class LabelSettings(BaseModel):
await_label: str | None = None
number: int
default_config = {"approved-2": LabelSettings(await_label="awaiting-review", number=2)}
class Settings(BaseSettings):
github_repository: str
token: SecretStr
debug: bool | None = False
config: dict[str, LabelSettings] | Literal[""] = default_config
settings = Settings()
if settings.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logging.debug(f"Using config: {settings.json()}")
g = Github(settings.token.get_secret_value())
repo = g.get_repo(settings.github_repository)
for pr in repo.get_pulls(state="open"):
logging.info(f"Checking PR: #{pr.number}")
pr_labels = list(pr.get_labels())
pr_label_by_name = {label.name: label for label in pr_labels}
reviews = list(pr.get_reviews())
review_by_user: dict[str, PullRequestReview] = {}
for review in reviews:
if review.user.login in review_by_user:
stored_review = review_by_user[review.user.login]
if review.submitted_at >= stored_review.submitted_at:
review_by_user[review.user.login] = review
else:
review_by_user[review.user.login] = review
approved_reviews = [
review for review in review_by_user.values() if review.state == "APPROVED"
]
config = settings.config or default_config
for approved_label, conf in config.items():
logging.debug(f"Processing config: {conf.json()}")
if conf.await_label is None or (conf.await_label in pr_label_by_name):
logging.debug(f"Processable PR: {pr.number}")
if len(approved_reviews) >= conf.number:
logging.info(f"Adding label to PR: {pr.number}")
pr.add_to_labels(approved_label)
if conf.await_label:
logging.info(f"Removing label from PR: {pr.number}")
pr.remove_from_labels(conf.await_label)
logging.info("Finished")
|
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa F401
__all__.append('TorchEmbedding')
|
from docarray.typing.tensor.embedding.embedding import Embedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'Embedding']
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa F401
__all__.append('TorchEmbedding')
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Literal
from sentence_transformers.evaluation import TripletEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTripletEvaluator(TripletEvaluator):
def __init__(
self,
anchors: list[str],
positives: list[str],
negatives: list[str],
main_similarity_function: str | SimilarityFunction | None = None,
margin: float | dict[str, float] | None = None,
name: str = "",
batch_size: int = 16,
show_progress_bar: bool = False,
write_csv: bool = True,
truncate_dim: int | None = None,
similarity_fn_names: list[Literal["cosine", "dot", "euclidean", "manhattan"]] | None = None,
main_distance_function: str | SimilarityFunction | None = "deprecated",
):
super().__init__(
anchors=anchors,
positives=positives,
negatives=negatives,
main_similarity_function=main_similarity_function,
margin=margin,
name=name,
batch_size=batch_size,
show_progress_bar=show_progress_bar,
write_csv=write_csv,
truncate_dim=truncate_dim,
similarity_fn_names=similarity_fn_names,
main_distance_function=main_distance_function,
)
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super().__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import TripletEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseTripletEvaluator(TripletEvaluator):
def __call__(
self, model: SparseEncoder, output_path: str = None, epoch: int = -1, steps: int = -1
) -> dict[str, float]:
return super.__call__(model, output_path, epoch, steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self, model: SparseEncoder, metrics: dict[str, Any], epoch: int = 0, step: int = 0
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch, step)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky
from keras.src.ops.linalg import det
from keras.src.ops.linalg import eig
from keras.src.ops.linalg import eigh
from keras.src.ops.linalg import inv
from keras.src.ops.linalg import lstsq
from keras.src.ops.linalg import lu_factor
from keras.src.ops.linalg import norm
from keras.src.ops.linalg import qr
from keras.src.ops.linalg import solve
from keras.src.ops.linalg import solve_triangular
from keras.src.ops.linalg import svd
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.linalg import cholesky
from keras.src.ops.linalg import det
from keras.src.ops.linalg import eig
from keras.src.ops.linalg import eigh
from keras.src.ops.linalg import inv
from keras.src.ops.linalg import lu_factor
from keras.src.ops.linalg import norm
from keras.src.ops.linalg import qr
from keras.src.ops.linalg import solve
from keras.src.ops.linalg import solve_triangular
from keras.src.ops.linalg import svd
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
|
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
|
_base_ = './solov2_r50_fpn_1x_coco.py'
# model settings
model = dict(
mask_head=dict(
stacked_convs=2,
feat_channels=256,
scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)),
mask_feature_head=dict(out_channels=128)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384),
(768, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=(448, 768), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
_base_ = './solov2_r50_fpn_1x_coco.py'
# model settings
model = dict(
mask_head=dict(
stacked_convs=2,
feat_channels=256,
scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)),
mask_feature_head=dict(out_channels=128)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomChoiceResize',
scales=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384),
(768, 352)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(448, 768), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# training schedule for 3x
max_epochs = 36
train_cfg = dict(by_epoch=True, max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0 / 3,
by_epoch=False,
begin=0,
end=500),
dict(
type='MultiStepLR',
begin=0,
end=36,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
|
from pathlib import Path
from typing import List
import pytest
from executor.audioclip_text import AudioCLIPTextEncoder
from jina import Document, DocumentArray, Executor
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='module')
def basic_encoder() -> AudioCLIPTextEncoder:
return AudioCLIPTextEncoder(
model_path=str(Path(__file__).parents[2] / '.cache/AudioCLIP-Full-Training.pt'),
tokenizer_path=str(
Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz'
),
)
def test_config():
ex = Executor.load_config(
str(Path(__file__).parents[2] / 'config.yml'),
override_with={
'model_path': str(
Path(__file__).parents[2] / '.cache/AudioCLIP-Full-Training.pt'
),
'tokenizer_path': str(
Path(__file__).parents[2] / '.cache/bpe_simple_vocab_16e6.txt.gz'
),
},
)
assert ex.default_batch_size == 32
def test_no_document(basic_encoder: AudioCLIPTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: AudioCLIPTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: AudioCLIPTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from pathlib import Path
from typing import List
import pytest
from jina import Document, DocumentArray, Executor
from ...audioclip_text import AudioCLIPTextEncoder
_EMBEDDING_DIM = 1024
@pytest.fixture(scope='module')
def basic_encoder() -> AudioCLIPTextEncoder:
return AudioCLIPTextEncoder()
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.default_batch_size == 32
def test_no_document(basic_encoder: AudioCLIPTextEncoder):
basic_encoder.encode(None, {})
def test_empty_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([])
basic_encoder.encode(docs, {})
assert len(docs) == 0
def test_no_text_documents(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray([Document()])
basic_encoder.encode(docs, {})
assert len(docs) == 1
assert docs[0].embedding is None
def test_encoding_cpu():
enc = AudioCLIPTextEncoder(device='cpu')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.gpu
def test_encoding_gpu():
enc = AudioCLIPTextEncoder(device='cuda')
input_data = DocumentArray([Document(text='hello world')])
enc.encode(docs=input_data, parameters={})
assert input_data[0].embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.parametrize(
'traversal_paths, counts',
[
(['r'], [['r', 1], ['c', 0], ['cc', 0]]),
(['c'], [['r', 0], ['c', 3], ['cc', 0]]),
(['cc'], [['r', 0], ['c', 0], ['cc', 2]]),
(['cc', 'r'], [['r', 1], ['c', 0], ['cc', 2]]),
],
)
def test_traversal_path(
traversal_paths: List[str], counts: List, basic_encoder: AudioCLIPTextEncoder
):
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [
Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text),
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
basic_encoder.encode(docs=docs, parameters={'traversal_paths': traversal_paths})
for path, count in counts:
embeddings = docs.traverse_flat([path]).get_attributes('embedding')
assert len(list(filter(lambda x: x is not None, embeddings))) == count
@pytest.mark.parametrize('batch_size', [1, 2, 4, 8])
def test_batch_size(basic_encoder: AudioCLIPTextEncoder, batch_size: int):
docs = DocumentArray([Document(text='hello there') for _ in range(32)])
basic_encoder.encode(docs, parameters={'batch_size': batch_size})
for doc in docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
def test_quality_embeddings(basic_encoder: AudioCLIPTextEncoder):
docs = DocumentArray(
[
Document(id='A', text='a furry animal that with a long tail'),
Document(id='B', text='a domesticated mammal with four legs'),
Document(id='C', text='a type of aircraft that uses rotating wings'),
Document(id='D', text='flying vehicle that has fixed wings and engines'),
]
)
basic_encoder.encode(DocumentArray(docs), {})
# assert semantic meaning is captured in the encoding
docs.match(docs)
matches = ['B', 'A', 'D', 'C']
for i, doc in enumerate(docs):
assert doc.matches[1].id == matches[i]
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_hunyuan_video import AutoencoderKLHunyuanVideo
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
from .autoencoder_asym_kl import AsymmetricAutoencoderKL
from .autoencoder_dc import AutoencoderDC
from .autoencoder_kl import AutoencoderKL
from .autoencoder_kl_allegro import AutoencoderKLAllegro
from .autoencoder_kl_cogvideox import AutoencoderKLCogVideoX
from .autoencoder_kl_ltx import AutoencoderKLLTXVideo
from .autoencoder_kl_mochi import AutoencoderKLMochi
from .autoencoder_kl_temporal_decoder import AutoencoderKLTemporalDecoder
from .autoencoder_oobleck import AutoencoderOobleck
from .autoencoder_tiny import AutoencoderTiny
from .consistency_decoder_vae import ConsistencyDecoderVAE
from .vq_model import VQModel
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], f"(Score: {score:.4f})")
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
"""
This is a simple application for sentence embeddings: semantic search
We have a corpus with various sentences. Then, for a given query sentence,
we want to find the most similar sentence in this corpus.
This script outputs for various queries the top 5 most similar sentences in the corpus.
"""
import torch
from sentence_transformers import SentenceTransformer
embedder = SentenceTransformer("all-MiniLM-L6-v2")
# Corpus with example sentences
corpus = [
"A man is eating food.",
"A man is eating a piece of bread.",
"The girl is carrying a baby.",
"A man is riding a horse.",
"A woman is playing violin.",
"Two men pushed carts through the woods.",
"A man is riding a white horse on an enclosed ground.",
"A monkey is playing drums.",
"A cheetah is running behind its prey.",
]
# Use "convert_to_tensor=True" to keep the tensors on GPU (if available)
corpus_embeddings = embedder.encode(corpus, convert_to_tensor=True)
# Query sentences:
queries = [
"A man is eating pasta.",
"Someone in a gorilla costume is playing a set of drums.",
"A cheetah chases prey on across a field.",
]
# Find the closest 5 sentences of the corpus for each query sentence based on cosine similarity
top_k = min(5, len(corpus))
for query in queries:
query_embedding = embedder.encode(query, convert_to_tensor=True)
# We use cosine-similarity and torch.topk to find the highest 5 scores
similarity_scores = embedder.similarity(query_embedding, corpus_embeddings)[0]
scores, indices = torch.topk(similarity_scores, k=top_k)
print("\nQuery:", query)
print("Top 5 most similar sentences in corpus:")
for score, idx in zip(scores, indices):
print(corpus[idx], "(Score: {:.4f})".format(score))
"""
# Alternatively, we can also use util.semantic_search to perform cosine similarty + topk
hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=5)
hits = hits[0] #Get the hits for the first query
for hit in hits:
print(corpus[hit['corpus_id']], "(Score: {:.4f})".format(hit['score']))
"""
|
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# learning policy
max_epochs = 24
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './fovea_r50_fpn_4x4_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
bbox_head=dict(
with_deform=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
data = dict(train=dict(pipeline=train_pipeline))
# learning policy
lr_config = dict(step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
# CoSENTLoss must be imported before AnglELoss
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
# coding=utf-8
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
# Configuration
LIBRARY_NAME = "diffusers"
GITHUB_REPO = "huggingface/diffusers"
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def check_pypi_for_latest_release(library_name):
"""Check PyPI for the latest release of the library."""
response = requests.get(f"https://pypi.org/pypi/{library_name}/json", timeout=60)
if response.status_code == 200:
data = response.json()
return data["info"]["version"]
else:
print("Failed to fetch library details from PyPI.")
return None
def get_github_release_info(github_repo):
"""Fetch the latest release info from GitHub."""
url = f"https://api.github.com/repos/{github_repo}/releases/latest"
response = requests.get(url, timeout=60)
if response.status_code == 200:
data = response.json()
return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]}
else:
print("Failed to fetch release info from GitHub.")
return None
def notify_slack(webhook_url, library_name, version, release_info):
"""Send a notification to a Slack channel."""
message = (
f"🚀 New release for {library_name} available: version **{version}** 🎉\n"
f"📜 Release Notes: {release_info['url']}\n"
f"⏱️ Release time: {release_info['release_time']}"
)
payload = {"text": message}
response = requests.post(webhook_url, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
def main():
latest_version = check_pypi_for_latest_release(LIBRARY_NAME)
release_info = get_github_release_info(GITHUB_REPO)
parsed_version = release_info["tag_name"].replace("v", "")
if latest_version and release_info and latest_version == parsed_version:
notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info)
else:
print(f"{latest_version=}, {release_info=}, {parsed_version=}")
raise ValueError("There were some problems.")
if __name__ == "__main__":
main()
|
# coding=utf-8
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import requests
# Configuration
LIBRARY_NAME = "diffusers"
GITHUB_REPO = "huggingface/diffusers"
SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL")
def check_pypi_for_latest_release(library_name):
"""Check PyPI for the latest release of the library."""
response = requests.get(f"https://pypi.org/pypi/{library_name}/json", timeout=60)
if response.status_code == 200:
data = response.json()
return data["info"]["version"]
else:
print("Failed to fetch library details from PyPI.")
return None
def get_github_release_info(github_repo):
"""Fetch the latest release info from GitHub."""
url = f"https://api.github.com/repos/{github_repo}/releases/latest"
response = requests.get(url, timeout=60)
if response.status_code == 200:
data = response.json()
return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]}
else:
print("Failed to fetch release info from GitHub.")
return None
def notify_slack(webhook_url, library_name, version, release_info):
"""Send a notification to a Slack channel."""
message = (
f"🚀 New release for {library_name} available: version **{version}** 🎉\n"
f"📜 Release Notes: {release_info['url']}\n"
f"⏱️ Release time: {release_info['release_time']}"
)
payload = {"text": message}
response = requests.post(webhook_url, json=payload)
if response.status_code == 200:
print("Notification sent to Slack successfully.")
else:
print("Failed to send notification to Slack.")
def main():
latest_version = check_pypi_for_latest_release(LIBRARY_NAME)
release_info = get_github_release_info(GITHUB_REPO)
parsed_version = release_info["tag_name"].replace("v", "")
if latest_version and release_info and latest_version == parsed_version:
notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info)
else:
print(f"{latest_version=}, {release_info=}, {parsed_version=}")
raise ValueError("There were some problems.")
if __name__ == "__main__":
main()
|
from typing import Optional, TypeVar
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.tensor = video.url.load_key_frames()
model = MyEmbeddingModel()
video.embedding = model(video.tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load_key_frames()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
from typing import Optional, TypeVar
from docarray.base_document import BaseDocument
from docarray.documents import Audio
from docarray.typing import AnyEmbedding, AnyTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
T = TypeVar('T', bound='Video')
class Video(BaseDocument):
"""
Document for handling video.
The Video Document can contain a VideoUrl (`Video.url`), an Audio Document
(`Video.audio`), a VideoTensor (`Video.video_tensor`), an AnyTensor representing
the indices of the video's key frames (`Video.key_frame_indices`) and an
AnyEmbedding (`Video.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Video
# use it directly
vid = Video(
url='https://github.com/docarray/docarray/tree/feat-add-video-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.audio.tensor, vid.video_tensor, vid.key_frame_indices = vid.url.load()
model = MyEmbeddingModel()
vid.embedding = model(vid.video_tensor)
You can extend this Document:
.. code-block:: python
from typing import Optional
from docarray.documents import Text, Video
# extend it
class MyVideo(Video):
name: Optional[Text]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video.video_tensor = video.url.load_key_frames()
model = MyEmbeddingModel()
video.embedding = model(video.video_tensor)
video.name = Text(text='my first video')
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Text, Video
# compose it
class MultiModalDoc(BaseDocument):
video: Video
text: Text
mmdoc = MultiModalDoc(
video=Video(
url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.video.video_tensor = mmdoc.video.url.load_key_frames()
"""
url: Optional[VideoUrl]
audio: Optional[Audio] = Audio()
video_tensor: Optional[VideoTensor]
key_frame_indices: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/models/mask-rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs'),
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field
from pydantic import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.document.io.json import orjson_dumps
from docarray.document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.document.io.json import orjson_dumps
from docarray.document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: ID.validate(os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
|
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import SerializeAsAny, ConfigDict
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.prompts import BasePromptTemplate
class LLMPredictStartEvent(BaseEvent):
"""
LLMPredictStartEvent.
Args:
template (BasePromptTemplate): Prompt template.
template_args (Optional[dict]): Prompt template arguments.
"""
template: SerializeAsAny[BasePromptTemplate]
template_args: Optional[dict]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMPredictStartEvent"
class LLMPredictEndEvent(BaseEvent):
"""
LLMPredictEndEvent.
The result of an llm.predict() call.
Args:
output (str): Output.
"""
output: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMPredictEndEvent"
class LLMStructuredPredictStartEvent(BaseEvent):
"""
LLMStructuredPredictStartEvent.
Args:
output_cls (Any): Output class to predict.
template (BasePromptTemplate): Prompt template.
template_args (Optional[dict]): Prompt template arguments.
"""
output_cls: Any
template: SerializeAsAny[BasePromptTemplate]
template_args: Optional[dict]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictStartEvent"
class LLMStructuredPredictEndEvent(BaseEvent):
"""
LLMStructuredPredictEndEvent.
Args:
output (BaseModel): Predicted output class.
"""
output: SerializeAsAny[Any]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictEndEvent"
class LLMStructuredPredictInProgressEvent(BaseEvent):
"""
LLMStructuredPredictInProgressEvent.
Args:
output (BaseModel): Predicted output class.
"""
output: SerializeAsAny[Any]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictInProgressEvent"
class LLMCompletionStartEvent(BaseEvent):
"""
LLMCompletionStartEvent.
Args:
prompt (str): The prompt to be completed.
additional_kwargs (dict): Additional keyword arguments.
model_dict (dict): Model dictionary.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
prompt: str
additional_kwargs: dict
model_dict: dict
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionStartEvent"
class LLMCompletionInProgressEvent(BaseEvent):
"""
LLMCompletionInProgressEvent.
Args:
prompt (str): The prompt to be completed.
response (CompletionResponse): Completion response.
"""
prompt: str
response: CompletionResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionInProgressEvent"
class LLMCompletionEndEvent(BaseEvent):
"""
LLMCompletionEndEvent.
Args:
prompt (str): The prompt to be completed.
response (CompletionResponse): Completion response.
"""
prompt: str
response: CompletionResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionEndEvent"
class LLMChatStartEvent(BaseEvent):
"""
LLMChatStartEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
additional_kwargs (dict): Additional keyword arguments.
model_dict (dict): Model dictionary.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
messages: List[ChatMessage]
additional_kwargs: dict
model_dict: dict
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatStartEvent"
class LLMChatInProgressEvent(BaseEvent):
"""
LLMChatInProgressEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
response (ChatResponse): Chat response currently being streamed.
"""
messages: List[ChatMessage]
response: ChatResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatInProgressEvent"
class LLMChatEndEvent(BaseEvent):
"""
LLMChatEndEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
response (Optional[ChatResponse]): Last chat response.
"""
messages: List[ChatMessage]
response: Optional[ChatResponse]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatEndEvent"
|
from typing import Any, List, Optional
from llama_index.core.bridge.pydantic import SerializeAsAny, ConfigDict
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
CompletionResponse,
)
from llama_index.core.instrumentation.events.base import BaseEvent
from llama_index.core.prompts import BasePromptTemplate
class LLMPredictStartEvent(BaseEvent):
"""LLMPredictStartEvent.
Args:
template (BasePromptTemplate): Prompt template.
template_args (Optional[dict]): Prompt template arguments.
"""
template: SerializeAsAny[BasePromptTemplate]
template_args: Optional[dict]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMPredictStartEvent"
class LLMPredictEndEvent(BaseEvent):
"""LLMPredictEndEvent.
The result of an llm.predict() call.
Args:
output (str): Output.
"""
output: str
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMPredictEndEvent"
class LLMStructuredPredictStartEvent(BaseEvent):
"""LLMStructuredPredictStartEvent.
Args:
output_cls (Any): Output class to predict.
template (BasePromptTemplate): Prompt template.
template_args (Optional[dict]): Prompt template arguments.
"""
output_cls: Any
template: SerializeAsAny[BasePromptTemplate]
template_args: Optional[dict]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictStartEvent"
class LLMStructuredPredictEndEvent(BaseEvent):
"""LLMStructuredPredictEndEvent.
Args:
output (BaseModel): Predicted output class.
"""
output: SerializeAsAny[Any]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictEndEvent"
class LLMStructuredPredictInProgressEvent(BaseEvent):
"""LLMStructuredPredictInProgressEvent.
Args:
output (BaseModel): Predicted output class.
"""
output: SerializeAsAny[Any]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictInProgressEvent"
class LLMCompletionStartEvent(BaseEvent):
"""LLMCompletionStartEvent.
Args:
prompt (str): The prompt to be completed.
additional_kwargs (dict): Additional keyword arguments.
model_dict (dict): Model dictionary.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
prompt: str
additional_kwargs: dict
model_dict: dict
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionStartEvent"
class LLMCompletionInProgressEvent(BaseEvent):
"""LLMCompletionInProgressEvent.
Args:
prompt (str): The prompt to be completed.
response (CompletionResponse): Completion response.
"""
prompt: str
response: CompletionResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionInProgressEvent"
class LLMCompletionEndEvent(BaseEvent):
"""LLMCompletionEndEvent.
Args:
prompt (str): The prompt to be completed.
response (CompletionResponse): Completion response.
"""
prompt: str
response: CompletionResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMCompletionEndEvent"
class LLMChatStartEvent(BaseEvent):
"""LLMChatStartEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
additional_kwargs (dict): Additional keyword arguments.
model_dict (dict): Model dictionary.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
messages: List[ChatMessage]
additional_kwargs: dict
model_dict: dict
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatStartEvent"
class LLMChatInProgressEvent(BaseEvent):
"""LLMChatInProgressEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
response (ChatResponse): Chat response currently being streamed.
"""
messages: List[ChatMessage]
response: ChatResponse
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatInProgressEvent"
class LLMChatEndEvent(BaseEvent):
"""LLMChatEndEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
response (Optional[ChatResponse]): Last chat response.
"""
messages: List[ChatMessage]
response: Optional[ChatResponse]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatEndEvent"
|
from typing import Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp
from docarray.typing.tensor.jaxarray import JaxArray # noqa: F401
from docarray.typing.tensor.video.video_jax_array import VideoJaxArray
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.video.video_tensorflow_tensor import (
VideoTensorFlowTensor,
)
T = TypeVar("T", bound="VideoTensor")
class VideoTensor(AnyTensor, VideoTensorMixin):
"""
Represents a Video tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import VideoTensor
class MyVideoDoc(BaseDoc):
video: VideoTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyVideoDoc(video=tf.zeros(1000, 2))
type(doc.video) # VideoTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyVideoDoc(video=torch.zeros(1000, 2))
type(doc.video) # VideoTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyVideoDoc(video=np.zeros((1000, 2)))
type(doc.video) # VideoNdArray
'''
---
Returns:
Union[VideoTorchTensor, VideoTensorFlowTensor, VideoNdArray]: The validated and converted audio tensor.
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(VideoTorchTensor, value)
elif isinstance(value, torch.Tensor):
return VideoTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(VideoTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return VideoTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(VideoJaxArray, value)
elif isinstance(value, jnp.ndarray):
return VideoJaxArray._docarray_from_native(value) # noqa
if isinstance(value, VideoNdArray):
return cast(VideoNdArray, value)
if isinstance(value, np.ndarray):
try:
return VideoNdArray._docarray_validate(value)
except Exception as e: # noqa
raise e
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.tensor import AnyTensor
from docarray.typing.tensor.video.video_ndarray import VideoNdArray
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
from docarray.utils._internal.misc import (
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp
from docarray.typing.tensor.jaxarray import JaxArray # noqa: F401
from docarray.typing.tensor.video.video_jax_array import VideoJaxArray
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
from docarray.typing.tensor.video.video_torch_tensor import VideoTorchTensor
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
from docarray.typing.tensor.video.video_tensorflow_tensor import (
VideoTensorFlowTensor,
)
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="VideoTensor")
class VideoTensor(AnyTensor, VideoTensorMixin):
"""
Represents a Video tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import VideoTensor
class MyVideoDoc(BaseDoc):
video: VideoTensor
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyVideoDoc(video=tf.zeros(1000, 2))
type(doc.video) # VideoTensorFlowTensor
# Example usage with PyTorch:
import torch
doc = MyVideoDoc(video=torch.zeros(1000, 2))
type(doc.video) # VideoTorchTensor
# Example usage with NumPy:
import numpy as np
doc = MyVideoDoc(video=np.zeros((1000, 2)))
type(doc.video) # VideoNdArray
'''
---
Returns:
Union[VideoTorchTensor, VideoTensorFlowTensor, VideoNdArray]: The validated and converted audio tensor.
Raises:
TypeError: If the input value is not a compatible type (torch.Tensor, tensorflow.Tensor, numpy.ndarray).
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(VideoTorchTensor, value)
elif isinstance(value, torch.Tensor):
return VideoTorchTensor._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(VideoTensorFlowTensor, value)
elif isinstance(value, tf.Tensor):
return VideoTensorFlowTensor._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(VideoJaxArray, value)
elif isinstance(value, jnp.ndarray):
return VideoJaxArray._docarray_from_native(value) # noqa
if isinstance(value, VideoNdArray):
return cast(VideoNdArray, value)
if isinstance(value, np.ndarray):
try:
return VideoNdArray.validate(value, field, config)
except Exception as e: # noqa
raise e
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data):
info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token)
assert list(info.splits.keys()) == ["train"]
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("acronym_identification", ["default"]),
("squad", ["plain_text"]),
("hf-internal-testing/dataset_with_script", ["default"]),
("dalle-mini/wit", ["default"]),
("hf-internal-testing/librispeech_asr_dummy", ["clean", "other"]),
("hf-internal-testing/audiofolder_no_configs_in_metadata", ["default"]),
("hf-internal-testing/audiofolder_single_config_in_metadata", ["custom"]),
("hf-internal-testing/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
pytestmark = pytest.mark.integration
@pytest.mark.parametrize("path", ["paws", "csv"])
def test_inspect_dataset(path, tmp_path):
inspect_dataset(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path", ["accuracy"])
def test_inspect_metric(path, tmp_path):
inspect_metric(path, tmp_path)
script_name = path + ".py"
assert script_name in os.listdir(tmp_path)
assert "__pycache__" not in os.listdir(tmp_path)
@pytest.mark.parametrize(
"path, config_name, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_config_info(path, config_name, expected_splits):
info = get_dataset_config_info(path, config_name=config_name)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
def test_get_dataset_config_info_private(hf_token, hf_private_dataset_repo_txt_data):
info = get_dataset_config_info(hf_private_dataset_repo_txt_data, config_name="default", token=hf_token)
assert list(info.splits.keys()) == ["train"]
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_config_info_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_config_info(path, config_name=config_name)
@pytest.mark.parametrize(
"path, expected",
[
("squad", ["plain_text"]),
("acronym_identification", ["default"]),
("lhoestq/squad", ["plain_text"]),
("lhoestq/test", ["default"]),
("lhoestq/demo1", ["default"]),
("dalle-mini/wit", ["default"]),
("datasets-maintainers/audiofolder_no_configs_in_metadata", ["default"]),
("datasets-maintainers/audiofolder_single_config_in_metadata", ["custom"]),
("datasets-maintainers/audiofolder_two_configs_in_metadata", ["v1", "v2"]),
],
)
def test_get_dataset_config_names(path, expected):
config_names = get_dataset_config_names(path)
assert config_names == expected
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config",
[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["default"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
],
)
def test_get_dataset_info(path, expected_configs, expected_splits_in_first_config):
infos = get_dataset_infos(path)
assert list(infos.keys()) == expected_configs
expected_config = expected_configs[0]
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits",
[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "default", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
],
)
def test_get_dataset_split_names(path, expected_config, expected_splits):
infos = get_dataset_infos(path)
assert expected_config in infos
info = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception",
[
("paws", None, ValueError),
],
)
def test_get_dataset_split_names_error(path, config_name, expected_exception):
with pytest.raises(expected_exception):
get_dataset_split_names(path, config_name=config_name)
|
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from ...custom_image_torch_encoder import CustomImageTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def encoder(tmpdir):
model_state_dict_path = os.path.join(cur_dir, '../model/model_state_dict.pth')
return CustomImageTorchEncoder(model_definition_file=os.path.join(cur_dir, '../model/external_model.py'),
model_state_dict_path=model_state_dict_path, layer_name='conv1',
model_class_name='ExternalModel')
def test_encoder(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
def test_encoder_traversal_paths(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
Document(chunks=[Document(blob=test_img), Document(blob=test_img)])])
encoder.encode(docs, {'traversal_paths': ['c']})
assert len(docs) == 2
assert len(docs.traverse_flat(['c'])) == 4
for chunk in docs.traverse_flat(['c']):
assert chunk.embedding.shape == (output_dim,)
|
import pytest
import os
import numpy as np
from jina import Document, DocumentArray
try:
from custom_image_torch_encoder import CustomImageTorchEncoder
except:
from jinahub.encoder.custom_image_torch_encoder import CustomImageTorchEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
@pytest.fixture
def encoder(tmpdir):
model_state_dict_path = os.path.join(cur_dir, '../model/model_state_dict.pth')
return CustomImageTorchEncoder(model_definition_file=os.path.join(cur_dir, '../model/external_model.py'),
model_state_dict_path=model_state_dict_path, layer_name='conv1',
model_class_name='ExternalModel')
def test_encoder(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(blob=test_img), Document(blob=test_img)])
encoder.encode(docs, {})
assert len(docs) == 2
for doc in docs:
assert doc.embedding.shape == (output_dim,)
def test_encoder_traversal_paths(encoder):
output_dim = 10
input_dim = 224
test_img = np.random.rand(3, input_dim, input_dim)
docs = DocumentArray([Document(chunks=[Document(blob=test_img), Document(blob=test_img)]),
Document(chunks=[Document(blob=test_img), Document(blob=test_img)])])
encoder.encode(docs, {'traversal_paths': ['c']})
assert len(docs) == 2
assert len(docs.traverse_flat(['c'])) == 4
for chunk in docs.traverse_flat(['c']):
assert chunk.embedding.shape == (output_dim,)
|
_base_ = './htc_r50_fpn_1x_coco.py'
# learning policy
max_epochs = 20
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 19],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
|
_base_ = './htc_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
self.dataloader = runner.build_dataloader(dataloader)
else:
self.dataloader = dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> None:
"""Execute loop."""
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import Dict, Union
from torch.utils.data import DataLoader
class BaseLoop(metaclass=ABCMeta):
"""Base loop class.
All subclasses inherited from ``BaseLoop`` should overwrite the
:meth:`run` method.
Args:
runner (Runner): A reference of runner.
dataloader (Dataloader or dict): An iterator to generate one batch of
dataset each iteration.
"""
def __init__(self, runner, dataloader: Union[DataLoader, Dict]) -> None:
self._runner = runner
if isinstance(dataloader, dict):
self.dataloader = runner.build_dataloader(dataloader)
else:
self.dataloader = dataloader
# TODO, used by `end_of_epoch` of `Hook`
self._runner.data_loader = self.dataloader
@property
def runner(self):
return self._runner
@abstractmethod
def run(self) -> None:
"""Execute loop."""
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
|
from backend.executor.utils import merge_execution_input, parse_execution_output
def test_parse_execution_output():
# Test case for list extraction
output = ("result", [10, 20, 30])
assert parse_execution_output(output, "result_$_1") == 20
assert parse_execution_output(output, "result_$_3") is None
# Test case for dictionary extraction
output = ("config", {"key1": "value1", "key2": "value2"})
assert parse_execution_output(output, "config_#_key1") == "value1"
assert parse_execution_output(output, "config_#_key3") is None
# Test case for object extraction
class Sample:
attr1 = "value1"
attr2 = "value2"
output = ("object", Sample())
assert parse_execution_output(output, "object_@_attr1") == "value1"
assert parse_execution_output(output, "object_@_attr3") is None
# Test case for direct match
output = ("direct", "match")
assert parse_execution_output(output, "direct") == "match"
assert parse_execution_output(output, "nomatch") is None
def test_merge_execution_input():
# Test case for merging list inputs
data = {"list_$_0": "a", "list_$_1": "b", "list_$_3": "d"}
merged_data = merge_execution_input(data)
assert merged_data["list"] == ["a", "b", "", "d"]
# Test case for merging dictionary inputs
data = {"dict_#_key1": "value1", "dict_#_key2": "value2"}
merged_data = merge_execution_input(data)
assert merged_data["dict"] == {"key1": "value1", "key2": "value2"}
# Test case for merging object inputs
data = {"object_@_attr1": "value1", "object_@_attr2": "value2"}
merged_data = merge_execution_input(data)
assert hasattr(merged_data["object"], "attr1")
assert hasattr(merged_data["object"], "attr2")
assert merged_data["object"].attr1 == "value1"
assert merged_data["object"].attr2 == "value2"
# Test case for mixed inputs
data = {"list_$_0": "a", "dict_#_key1": "value1", "object_@_attr1": "value1"}
merged_data = merge_execution_input(data)
assert merged_data["list"] == ["a"]
assert merged_data["dict"] == {"key1": "value1"}
assert hasattr(merged_data["object"], "attr1")
assert merged_data["object"].attr1 == "value1"
|
from backend.data.execution import merge_execution_input, parse_execution_output
def test_parse_execution_output():
# Test case for list extraction
output = ("result", [10, 20, 30])
assert parse_execution_output(output, "result_$_1") == 20
assert parse_execution_output(output, "result_$_3") is None
# Test case for dictionary extraction
output = ("config", {"key1": "value1", "key2": "value2"})
assert parse_execution_output(output, "config_#_key1") == "value1"
assert parse_execution_output(output, "config_#_key3") is None
# Test case for object extraction
class Sample:
attr1 = "value1"
attr2 = "value2"
output = ("object", Sample())
assert parse_execution_output(output, "object_@_attr1") == "value1"
assert parse_execution_output(output, "object_@_attr3") is None
# Test case for direct match
output = ("direct", "match")
assert parse_execution_output(output, "direct") == "match"
assert parse_execution_output(output, "nomatch") is None
def test_merge_execution_input():
# Test case for merging list inputs
data = {"list_$_0": "a", "list_$_1": "b", "list_$_3": "d"}
merged_data = merge_execution_input(data)
assert merged_data["list"] == ["a", "b", "", "d"]
# Test case for merging dictionary inputs
data = {"dict_#_key1": "value1", "dict_#_key2": "value2"}
merged_data = merge_execution_input(data)
assert merged_data["dict"] == {"key1": "value1", "key2": "value2"}
# Test case for merging object inputs
data = {"object_@_attr1": "value1", "object_@_attr2": "value2"}
merged_data = merge_execution_input(data)
assert hasattr(merged_data["object"], "attr1")
assert hasattr(merged_data["object"], "attr2")
assert merged_data["object"].attr1 == "value1"
assert merged_data["object"].attr2 == "value2"
# Test case for mixed inputs
data = {"list_$_0": "a", "dict_#_key1": "value1", "object_@_attr1": "value1"}
merged_data = merge_execution_input(data)
assert merged_data["list"] == ["a"]
assert merged_data["dict"] == {"key1": "value1"}
assert hasattr(merged_data["object"], "attr1")
assert merged_data["object"].attr1 == "value1"
|
from __future__ import annotations
import csv
import logging
import os
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: list[list[str]],
labels: list[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: list[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info("Accuracy: {:.2f}\t(Threshold: {:.4f})".format(acc * 100, acc_threshold))
logger.info("F1: {:.2f}\t(Threshold: {:.4f})".format(f1 * 100, f1_threshold))
logger.info("Precision: {:.2f}".format(precision * 100))
logger.info("Recall: {:.2f}".format(recall * 100))
logger.info("Average Precision: {:.2f}\n".format(ap * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
import csv
import logging
import os
from typing import List
import numpy as np
from sklearn.metrics import average_precision_score
from sentence_transformers import InputExample
from sentence_transformers.evaluation import BinaryClassificationEvaluator
logger = logging.getLogger(__name__)
class CEBinaryClassificationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and binary labels (0 and 1),
it compute the average precision and the best possible f1 score
"""
def __init__(
self,
sentence_pairs: List[List[str]],
labels: List[int],
name: str = "",
show_progress_bar: bool = False,
write_csv: bool = True,
):
assert len(sentence_pairs) == len(labels)
for label in labels:
assert label == 0 or label == 1
self.sentence_pairs = sentence_pairs
self.labels = np.asarray(labels)
self.name = name
if show_progress_bar is None:
show_progress_bar = (
logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG
)
self.show_progress_bar = show_progress_bar
self.csv_file = "CEBinaryClassificationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = [
"epoch",
"steps",
"Accuracy",
"Accuracy_Threshold",
"F1",
"F1_Threshold",
"Precision",
"Recall",
"Average_Precision",
]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
labels = []
for example in examples:
sentence_pairs.append(example.texts)
labels.append(example.label)
return cls(sentence_pairs, labels, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CEBinaryClassificationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(
self.sentence_pairs, convert_to_numpy=True, show_progress_bar=self.show_progress_bar
)
acc, acc_threshold = BinaryClassificationEvaluator.find_best_acc_and_threshold(pred_scores, self.labels, True)
f1, precision, recall, f1_threshold = BinaryClassificationEvaluator.find_best_f1_and_threshold(
pred_scores, self.labels, True
)
ap = average_precision_score(self.labels, pred_scores)
logger.info("Accuracy: {:.2f}\t(Threshold: {:.4f})".format(acc * 100, acc_threshold))
logger.info("F1: {:.2f}\t(Threshold: {:.4f})".format(f1 * 100, f1_threshold))
logger.info("Precision: {:.2f}".format(precision * 100))
logger.info("Recall: {:.2f}".format(recall * 100))
logger.info("Average Precision: {:.2f}\n".format(ap * 100))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, acc, acc_threshold, f1, f1_threshold, precision, recall, ap])
return ap
|
from __future__ import annotations
import random
import pytest
import torch
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
from sentence_transformers.util import is_datasets_available
if is_datasets_available():
from datasets import Dataset
else:
pytest.skip(
reason='Sentence Transformers was not installed with the `["train"]` extra.',
allow_module_level=True,
)
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
from __future__ import annotations
import random
import pytest
import torch
from datasets import Dataset
from torch.utils.data import ConcatDataset
from sentence_transformers.sampler import NoDuplicatesBatchSampler, ProportionalBatchSampler
@pytest.fixture
def dummy_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"data": [0, 47, 3, 30, 3, ... 2],
"label": [0, 1, 0, 1, ..., 0, 1],
}
"""
# Create a list of two 0's, two 1's, two 2's, ... two 49's. Then shuffle.
values = [j for i in range(50) for j in (i, i)]
random.shuffle(values)
data = {"data": values, "label": [i % 2 for i in range(100)]}
return Dataset.from_dict(data)
@pytest.fixture
def dummy_duplicates_dataset() -> Dataset:
"""
Dummy dataset for testing purposes. The dataset looks as follows:
{
"anchor": ["anchor_1", "anchor_1", "anchor_1", ... "anchor_2", "anchor_2"],
"positive": ["positive_1", "positive_1", "positive_1", ... "positive_2", "positive_2"],
}
"""
values = [{"anchor": "anchor_1", "positive": "positive_1"}] * 10 + [
{"anchor": "anchor_2", "positive": "positive_2"}
] * 8
return Dataset.from_list(values)
def test_group_by_label_batch_sampler_label_a(dummy_dataset: Dataset) -> None:
batch_size = 10
sampler = NoDuplicatesBatchSampler(
dataset=dummy_dataset, batch_size=batch_size, drop_last=True, valid_label_columns=["label"]
)
batches = list(iter(sampler))
# Assert all batch sizes are correct
assert all(len(batch) == batch_size for batch in batches)
# Assert batches contain no duplicate values
for batch in batches:
batch_values = [dummy_dataset[i]["data"] for i in batch]
assert len(batch_values) == len(set(batch_values)), f"Batch {batch} contains duplicate values: {batch_values}"
@pytest.mark.parametrize("drop_last", [True, False])
def test_proportional_no_duplicates(dummy_duplicates_dataset: Dataset, drop_last: bool) -> None:
batch_size = 2
sampler_1 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["anchor"]
)
sampler_2 = NoDuplicatesBatchSampler(
dataset=dummy_duplicates_dataset, batch_size=batch_size, drop_last=drop_last, valid_label_columns=["positive"]
)
concat_dataset = ConcatDataset([dummy_duplicates_dataset, dummy_duplicates_dataset])
batch_sampler = ProportionalBatchSampler(
concat_dataset, [sampler_1, sampler_2], generator=torch.Generator(), seed=12
)
batches = list(iter(batch_sampler))
if drop_last:
# If we drop the last batch (i.e. incomplete batches), we should have 16 batches out of the 18 possible,
# because of the duplicates being skipped by the NoDuplicatesBatchSampler.
# Notably, we should not crash like reported in #2816.
assert len(batches) == 16
# All batches are the same size: 2
assert all(len(batch) == batch_size for batch in batches)
assert len(sum(batches, [])) == 32
else:
# If we don't drop incomplete batches, we should be able to do 18 batches, and get more data.
# Note: we don't get all data, because the NoDuplicatesBatchSampler will estimate the number of batches
# and it would require more (non-complete) batches to get all data.
assert len(batches) == 18
assert len(sum(batches, [])) == 34
|
import uuid
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class TrafilaturaWebReader(BasePydanticReader):
"""
Trafilatura web page reader.
Reads pages from the web.
Requires the `trafilatura` package.
"""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "TrafilaturaWebReader"
def load_data(
self,
urls: List[str],
include_comments=True,
output_format="txt",
include_tables=True,
include_images=False,
include_formatting=False,
include_links=False,
show_progress=False,
no_ssl=False,
**kwargs,
) -> List[Document]:
"""
Load data from the urls.
Args:
urls (List[str]): List of URLs to scrape.
include_comments (bool, optional): Include comments in the output. Defaults to True.
output_format (str, optional): Output format. Defaults to 'txt'.
include_tables (bool, optional): Include tables in the output. Defaults to True.
include_images (bool, optional): Include images in the output. Defaults to False.
include_formatting (bool, optional): Include formatting in the output. Defaults to False.
include_links (bool, optional): Include links in the output. Defaults to False.
show_progress (bool, optional): Show progress bar. Defaults to False
no_ssl (bool, optional): Bypass SSL verification. Defaults to False.
kwargs: Additional keyword arguments for the `trafilatura.extract` function.
Returns:
List[Document]: List of documents.
"""
import trafilatura
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
if show_progress:
from tqdm import tqdm
iterator = tqdm(urls, desc="Downloading pages")
else:
iterator = urls
for url in iterator:
downloaded = trafilatura.fetch_url(url, no_ssl=no_ssl)
response = trafilatura.extract(
downloaded,
include_comments=include_comments,
output_format=output_format,
include_tables=include_tables,
include_images=include_images,
include_formatting=include_formatting,
include_links=include_links,
**kwargs,
)
documents.append(
Document(text=response, id_=str(uuid.uuid4()), metadata={"url": url})
)
return documents
|
from typing import List
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
class TrafilaturaWebReader(BasePydanticReader):
"""
Trafilatura web page reader.
Reads pages from the web.
Requires the `trafilatura` package.
"""
is_remote: bool = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "TrafilaturaWebReader"
def load_data(
self,
urls: List[str],
include_comments=True,
output_format="txt",
include_tables=True,
include_images=False,
include_formatting=False,
include_links=False,
show_progress=False,
no_ssl=False,
**kwargs,
) -> List[Document]:
"""
Load data from the urls.
Args:
urls (List[str]): List of URLs to scrape.
include_comments (bool, optional): Include comments in the output. Defaults to True.
output_format (str, optional): Output format. Defaults to 'txt'.
include_tables (bool, optional): Include tables in the output. Defaults to True.
include_images (bool, optional): Include images in the output. Defaults to False.
include_formatting (bool, optional): Include formatting in the output. Defaults to False.
include_links (bool, optional): Include links in the output. Defaults to False.
show_progress (bool, optional): Show progress bar. Defaults to False
no_ssl (bool, optional): Bypass SSL verification. Defaults to False.
kwargs: Additional keyword arguments for the `trafilatura.extract` function.
Returns:
List[Document]: List of documents.
"""
import trafilatura
if not isinstance(urls, list):
raise ValueError("urls must be a list of strings.")
documents = []
if show_progress:
from tqdm import tqdm
iterator = tqdm(urls, desc="Downloading pages")
else:
iterator = urls
for url in iterator:
downloaded = trafilatura.fetch_url(url, no_ssl=no_ssl)
response = trafilatura.extract(
downloaded,
include_comments=include_comments,
output_format=output_format,
include_tables=include_tables,
include_images=include_images,
include_formatting=include_formatting,
include_links=include_links,
**kwargs,
)
documents.append(Document(text=response, id_=url))
return documents
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (32, 32, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (32, 32, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
import os
from pathlib import Path
import numpy as np
import pytest
import torch
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
from mmdet.utils import register_all_modules
# TODO: Waiting to fix multiple call error bug
register_all_modules()
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_init_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(
config_file, device=device, cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device=device)
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
@pytest.mark.parametrize('config,devices',
[('configs/retinanet/retinanet_r18_fpn_1x_coco.py',
('cpu', 'cuda'))])
def test_inference_detector(config, devices):
assert all([device in ['cpu', 'cuda'] for device in devices])
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(project_dir, config)
# test init_detector with config_file: str and cfg_options
rng = np.random.RandomState(0)
img1 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
img2 = rng.randint(0, 255, (100, 100, 3), dtype=np.uint8)
for device in devices:
if device == 'cuda' and not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
model = init_detector(config_file, device=device)
result = inference_detector(model, img1)
assert isinstance(result, DetDataSample)
result = inference_detector(model, [img1, img2])
assert isinstance(result, list) and len(result) == 2
|
import itertools
from typing import (
TYPE_CHECKING,
Union,
Sequence,
overload,
Any,
List,
)
import numpy as np
from docarray import Document
from docarray.helper import typename
if TYPE_CHECKING:
from docarray.typing import (
DocumentArrayIndexType,
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArrayMultipleAttributeType,
DocumentArraySingleAttributeType,
)
from docarray import DocumentArray
class GetItemMixin:
"""Provide helper functions to enable advance indexing in `__getitem__`"""
@overload
def __getitem__(self, index: 'DocumentArraySingletonIndexType') -> 'Document':
...
@overload
def __getitem__(self, index: 'DocumentArrayMultipleIndexType') -> 'DocumentArray':
...
@overload
def __getitem__(self, index: 'DocumentArraySingleAttributeType') -> List[Any]:
...
@overload
def __getitem__(
self, index: 'DocumentArrayMultipleAttributeType'
) -> List[List[Any]]:
...
def __getitem__(
self, index: 'DocumentArrayIndexType'
) -> Union['Document', 'DocumentArray']:
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
return self._get_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
return self.traverse_flat(index[1:])
else:
return self._get_doc_by_id(index)
elif isinstance(index, slice):
from docarray import DocumentArray
return DocumentArray(self._get_docs_by_slice(index))
elif index is Ellipsis:
return self.flatten()
elif isinstance(index, Sequence):
from docarray import DocumentArray
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
return DocumentArray([self[index[0]], self[index[1]]])
else:
_docs = self[index[0]]
if not _docs:
return []
if isinstance(_docs, Document):
return getattr(_docs, index[1])
return _docs._get_attributes(index[1])
elif isinstance(index[0], (slice, Sequence)):
_docs = self[index[0]]
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
return _docs._get_attributes(*_attrs)
elif isinstance(index[0], bool):
return DocumentArray(itertools.compress(self, index))
elif isinstance(index[0], int):
return DocumentArray(self._get_docs_by_offsets(index))
elif isinstance(index[0], str):
return DocumentArray(self._get_docs_by_ids(index))
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
return self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
import itertools
from typing import (
TYPE_CHECKING,
Union,
Sequence,
overload,
Any,
List,
)
import numpy as np
from ... import Document
from ...helper import typename
if TYPE_CHECKING:
from ...typing import (
DocumentArrayIndexType,
DocumentArraySingletonIndexType,
DocumentArrayMultipleIndexType,
DocumentArrayMultipleAttributeType,
DocumentArraySingleAttributeType,
)
from ... import DocumentArray
class GetItemMixin:
"""Provide helper functions to enable advance indexing in `__getitem__`"""
@overload
def __getitem__(self, index: 'DocumentArraySingletonIndexType') -> 'Document':
...
@overload
def __getitem__(self, index: 'DocumentArrayMultipleIndexType') -> 'DocumentArray':
...
@overload
def __getitem__(self, index: 'DocumentArraySingleAttributeType') -> List[Any]:
...
@overload
def __getitem__(
self, index: 'DocumentArrayMultipleAttributeType'
) -> List[List[Any]]:
...
def __getitem__(
self, index: 'DocumentArrayIndexType'
) -> Union['Document', 'DocumentArray']:
if isinstance(index, (int, np.generic)) and not isinstance(index, bool):
return self._get_doc_by_offset(int(index))
elif isinstance(index, str):
if index.startswith('@'):
return self.traverse_flat(index[1:])
else:
return self._get_doc_by_id(index)
elif isinstance(index, slice):
from ... import DocumentArray
return DocumentArray(self._get_docs_by_slice(index))
elif index is Ellipsis:
return self.flatten()
elif isinstance(index, Sequence):
from ... import DocumentArray
if (
isinstance(index, tuple)
and len(index) == 2
and (
isinstance(index[0], (slice, Sequence, str, int))
or index[0] is Ellipsis
)
and isinstance(index[1], (str, Sequence))
):
# TODO: add support for cases such as da[1, ['text', 'id']]?
if isinstance(index[0], (str, int)) and isinstance(index[1], str):
# ambiguity only comes from the second string
if index[1] in self:
return DocumentArray([self[index[0]], self[index[1]]])
else:
_docs = self[index[0]]
if not _docs:
return []
if isinstance(_docs, Document):
return getattr(_docs, index[1])
return _docs._get_attributes(index[1])
elif isinstance(index[0], (slice, Sequence)):
_docs = self[index[0]]
_attrs = index[1]
if isinstance(_attrs, str):
_attrs = (index[1],)
return _docs._get_attributes(*_attrs)
elif isinstance(index[0], bool):
return DocumentArray(itertools.compress(self, index))
elif isinstance(index[0], int):
return DocumentArray(self._get_docs_by_offsets(index))
elif isinstance(index[0], str):
return DocumentArray(self._get_docs_by_ids(index))
elif isinstance(index, np.ndarray):
index = index.squeeze()
if index.ndim == 1:
return self[index.tolist()]
else:
raise IndexError(
f'When using np.ndarray as index, its `ndim` must =1. However, receiving ndim={index.ndim}'
)
raise IndexError(f'Unsupported index type {typename(index)}: {index}')
|
from typing import Optional
import agentql
import httpx
from llama_index.tools.agentql.const import EXTRACT_DATA_ENDPOINT, REQUEST_ORIGIN
from llama_index.tools.agentql.messages import (
QUERY_PROMPT_REQUIRED_ERROR_MESSAGE,
QUERY_PROMPT_EXCLUSIVE_ERROR_MESSAGE,
UNAUTHORIZED_ERROR_MESSAGE,
)
try:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
except ImportError as e:
raise ImportError(
"Unable to import playwright. Please make sure playwright module is properly installed."
) from e
async def _aget_current_agentql_page(browser: AsyncBrowser) -> AsyncPage:
"""
Get the current page of the async browser.
Args:
browser: The browser to get the current page from.
Returns:
Page: The current page.
"""
context = browser.contexts[0] if browser.contexts else await browser.new_context()
page = context.pages[-1] if context.pages else await context.new_page()
return await agentql.wrap_async(page)
def _handle_http_error(e: httpx.HTTPStatusError) -> None:
response = e.response
if response.status_code == httpx.codes.UNAUTHORIZED:
raise ValueError(UNAUTHORIZED_ERROR_MESSAGE) from e
msg = response.text
try:
error_json = response.json()
msg = (
error_json["error_info"] if "error_info" in error_json else str(error_json)
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
async def _aload_data(
url: str,
api_key: str,
metadata: dict,
params: dict,
timeout: int,
query: Optional[str] = None,
prompt: Optional[str] = None,
) -> dict:
if not query and not prompt:
raise ValueError(QUERY_PROMPT_REQUIRED_ERROR_MESSAGE)
if query and prompt:
raise ValueError(QUERY_PROMPT_EXCLUSIVE_ERROR_MESSAGE)
payload = {
"url": url,
"query": query,
"prompt": prompt,
"params": params,
"metadata": metadata,
}
headers = {
"X-API-Key": f"{api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
async with httpx.AsyncClient() as client:
try:
response = await client.post(
EXTRACT_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=timeout,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
_handle_http_error(e)
else:
return response.json()
|
from typing import Optional
import agentql
import httpx
from llama_index.tools.agentql.const import EXTRACT_DATA_ENDPOINT, REQUEST_ORIGIN
from llama_index.tools.agentql.messages import (
QUERY_PROMPT_REQUIRED_ERROR_MESSAGE,
QUERY_PROMPT_EXCLUSIVE_ERROR_MESSAGE,
UNAUTHORIZED_ERROR_MESSAGE,
)
try:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
except ImportError as e:
raise ImportError(
"Unable to import playwright. Please make sure playwright module is properly installed."
) from e
async def _aget_current_agentql_page(browser: AsyncBrowser) -> AsyncPage:
"""
Get the current page of the async browser.
Args:
browser: The browser to get the current page from.
Returns:
Page: The current page.
"""
context = browser.contexts[0] if browser.contexts else await browser.new_context()
page = context.pages[-1] if context.pages else await context.new_page()
return await agentql.wrap_async(page)
def _handle_http_error(e: httpx.HTTPStatusError) -> None:
response = e.response
if response.status_code == httpx.codes.UNAUTHORIZED:
raise ValueError(UNAUTHORIZED_ERROR_MESSAGE) from e
msg = response.text
try:
error_json = response.json()
msg = (
error_json["error_info"] if "error_info" in error_json else str(error_json)
)
except (ValueError, TypeError):
msg = f"HTTP {e}."
raise ValueError(msg) from e
async def _aload_data(
url: str,
api_key: str,
metadata: dict,
params: dict,
timeout: int,
query: Optional[str] = None,
prompt: Optional[str] = None,
) -> dict:
if not query and not prompt:
raise ValueError(QUERY_PROMPT_REQUIRED_ERROR_MESSAGE)
if query and prompt:
raise ValueError(QUERY_PROMPT_EXCLUSIVE_ERROR_MESSAGE)
payload = {
"url": url,
"query": query,
"prompt": prompt,
"params": params,
"metadata": metadata,
}
headers = {
"X-API-Key": f"{api_key}",
"Content-Type": "application/json",
"X-TF-Request-Origin": REQUEST_ORIGIN,
}
async with httpx.AsyncClient() as client:
try:
response = await client.post(
EXTRACT_DATA_ENDPOINT,
headers=headers,
json=payload,
timeout=timeout,
)
response.raise_for_status()
except httpx.HTTPStatusError as e:
_handle_http_error(e)
else:
return response.json()
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@MODELS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pycocotools.mask as mask_util
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
if isinstance(mask_results, tuple): # mask scoring
cls_segms, cls_mask_scores = mask_results
else:
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = [[] for _ in range(num_classes)]
for i in range(len(cls_segms)):
for cls_segm in cls_segms[i]:
encoded_mask_results[i].append(
mask_util.encode(
np.array(
cls_segm[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
if isinstance(mask_results, tuple):
return encoded_mask_results, cls_mask_scores
else:
return encoded_mask_results
|
import mmcv
import numpy as np
import pycocotools.mask as mask_util
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
if isinstance(mask_results, tuple): # mask scoring
cls_segms, cls_mask_scores = mask_results
else:
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = [[] for _ in range(num_classes)]
for i in range(len(cls_segms)):
for cls_segm in cls_segms[i]:
encoded_mask_results[i].append(
mask_util.encode(
np.array(
cls_segm[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
if isinstance(mask_results, tuple):
return encoded_mask_results, cls_mask_scores
else:
return encoded_mask_results
|
import pathlib
from typing import Any, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "usps"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class USPS(Dataset):
"""USPS Dataset
homepage="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass"
_RESOURCES = {
"train": HttpResource(
f"{_URL}/usps.bz2", sha256="3771e9dd6ba685185f89867b6e249233dd74652389f263963b3b741e994b034f"
),
"test": HttpResource(
f"{_URL}/usps.t.bz2", sha256="a9c0164e797d60142a50604917f0baa604f326e9a689698763793fa5d12ffc4e"
),
}
def _resources(self) -> list[OnlineResource]:
return [USPS._RESOURCES[self._split]]
def _prepare_sample(self, line: str) -> dict[str, Any]:
label, *values = line.strip().split(" ")
values = [float(value.split(":")[1]) for value in values]
pixels = torch.tensor(values).add_(1).div_(2)
return dict(
image=Image(pixels.reshape(16, 16)),
label=Label(int(label) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
dp = Decompressor(resource_dps[0])
dp = LineReader(dp, decode=True, return_path=False)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 7_291 if self._split == "train" else 2_007
|
import pathlib
from typing import Any, Dict, List, Union
import torch
from torchdata.datapipes.iter import Decompressor, IterDataPipe, LineReader, Mapper
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "usps"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class USPS(Dataset):
"""USPS Dataset
homepage="https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps",
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
_URL = "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass"
_RESOURCES = {
"train": HttpResource(
f"{_URL}/usps.bz2", sha256="3771e9dd6ba685185f89867b6e249233dd74652389f263963b3b741e994b034f"
),
"test": HttpResource(
f"{_URL}/usps.t.bz2", sha256="a9c0164e797d60142a50604917f0baa604f326e9a689698763793fa5d12ffc4e"
),
}
def _resources(self) -> List[OnlineResource]:
return [USPS._RESOURCES[self._split]]
def _prepare_sample(self, line: str) -> Dict[str, Any]:
label, *values = line.strip().split(" ")
values = [float(value.split(":")[1]) for value in values]
pixels = torch.tensor(values).add_(1).div_(2)
return dict(
image=Image(pixels.reshape(16, 16)),
label=Label(int(label) - 1, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = Decompressor(resource_dps[0])
dp = LineReader(dp, decode=True, return_path=False)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 7_291 if self._split == "train" else 2_007
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""
SPLADE Pooling module for creating the sparse embeddings.
This module implements the SPLADE pooling mechanism that:
1. Takes token logits from a masked language model (MLM)
2. Applies a sparse transformation using the RELU(log(1 + exp(MLM logits)))
3. Applies a pooling strategy (max or sum) to produce sparse embeddings
The resulting embeddings are highly sparse and capture lexical information,
making them suitable for efficient information retrieval.
Args:
pooling_strategy (str): The pooling strategy to use, either "max" or "sum".
"max" takes the maximum value across all tokens.
"sum" adds the values across all tokens.
"""
SPLADE_POOLING_MODES = ("sum", "max")
def __init__(self, pooling_strategy: str = "max", word_embedding_dimension: int = None) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
from __future__ import annotations
import json
import os
from typing import Any
import torch
from torch import nn
class SpladePooling(nn.Module):
"""SPLADE pooling layer that aggregates MLM logits using max or sum pooling.
This pooling layer takes MLM logits (shape: batch_size, seq_length, vocab_size)
and applies SPLADE transformation (ReLU + log) followed by pooling across the
sequence length dimension.
Args:
pooling_strategy: Either 'max' or 'sum' for SPLADE pooling
"""
SPLADE_POOLING_MODES = ("sum", "max")
def __init__(self, pooling_strategy: str = "max", word_embedding_dimension: int = None) -> None:
super().__init__()
self.pooling_strategy = pooling_strategy
if pooling_strategy not in self.SPLADE_POOLING_MODES:
raise ValueError("pooling_strategy must be either 'max' or 'sum'")
self.config_keys = ["pooling_strategy", "word_embedding_dimension"]
self.word_embedding_dimension = word_embedding_dimension # This will be set in the forward method
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Forward pass of the model.
Args:
features: Dictionary containing input features with 'mlm_logits' key
Returns:
Dictionary containing SPLADE pooled embeddings
"""
# Get the MLM head logits (shape: batch_size, seq_length, vocab_size)
mlm_logits = features["token_embeddings"]
# Apply ReLU and log transformation for SPLADE
splade_scores = torch.log1p(torch.relu(mlm_logits))
# Pool across sequence length dimension
if self.pooling_strategy == "max":
pooled_scores = torch.max(splade_scores, dim=1)[0] # shape: batch_size, vocab_size
else: # sum
pooled_scores = torch.sum(splade_scores, dim=1) # shape: batch_size, vocab_size
# Set the word embedding dimension
if self.word_embedding_dimension is None:
self.word_embedding_dimension = pooled_scores.shape[1]
features["sentence_embedding"] = pooled_scores
return features
def get_config_dict(self) -> dict[str, Any]:
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path) -> SpladePooling:
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return SpladePooling(**config)
def __repr__(self) -> str:
return f"SpladePooling({self.get_config_dict()})"
def get_sentence_embedding_dimension(self) -> int:
"""Get the dimension of the sentence embedding.
Returns:
int: Dimension of the sentence embedding
"""
return self.word_embedding_dimension
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorical_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.utils import numerical_utils
NUM_CLASSES = 5
class TestNumericalUtils(testing.TestCase, parameterized.TestCase):
@parameterized.parameters(
[
((1,), (1, NUM_CLASSES)),
((3,), (3, NUM_CLASSES)),
((4, 3), (4, 3, NUM_CLASSES)),
((5, 4, 3), (5, 4, 3, NUM_CLASSES)),
((3, 1), (3, NUM_CLASSES)),
((3, 2, 1), (3, 2, NUM_CLASSES)),
]
)
def test_to_categorical(self, shape, expected_shape):
label = np.random.randint(0, NUM_CLASSES, shape)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
# Check shape
self.assertEqual(one_hot.shape, expected_shape)
# Make sure there is only one 1 in a row
self.assertTrue(np.all(one_hot.sum(axis=-1) == 1))
# Get original labels back from one hots
self.assertTrue(
np.all(np.argmax(one_hot, -1).reshape(label.shape) == label)
)
def test_to_categorial_without_num_classes(self):
label = [0, 2, 5]
one_hot = numerical_utils.to_categorical(label)
self.assertEqual(one_hot.shape, (3, 5 + 1))
def test_to_categorical_with_backend_tensor(self):
label = backend.convert_to_tensor(np.array([0, 2, 1, 3, 4]))
expected = backend.convert_to_tensor(
np.array(
[
[1, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
)
)
one_hot = numerical_utils.to_categorical(label, NUM_CLASSES)
assert backend.is_tensor(one_hot)
self.assertAllClose(one_hot, expected)
@parameterized.parameters([1, 2, 3])
def test_normalize(self, order):
xb = backend.random.uniform((3, 3), seed=1337)
xnp = backend.convert_to_numpy(xb)
# Expected result
l2 = np.atleast_1d(np.linalg.norm(xnp, order, axis=-1))
l2[l2 == 0] = 1
expected = xnp / np.expand_dims(l2, axis=-1)
# Test NumPy
out = numerical_utils.normalize(xnp, axis=-1, order=order)
self.assertIsInstance(out, np.ndarray)
self.assertAllClose(out, expected)
# Test backend
out = numerical_utils.normalize(xb, axis=-1, order=order)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(backend.convert_to_numpy(out), expected)
|
# type: ignore
"""Development Scripts for template packages."""
from collections.abc import Sequence
from fastapi import FastAPI
from langserve import add_routes
from langchain_cli.utils.packages import get_langserve_export, get_package_root
def create_demo_server(
*,
config_keys: Sequence[str] = (),
playground_type: str = "default",
):
"""Creates a demo server for the current template."""
app = FastAPI()
package_root = get_package_root()
pyproject = package_root / "pyproject.toml"
try:
package = get_langserve_export(pyproject)
mod = __import__(package["module"], fromlist=[package["attr"]])
chain = getattr(mod, package["attr"])
add_routes(
app,
chain,
config_keys=config_keys,
playground_type=playground_type,
)
except KeyError as e:
msg = "Missing fields from pyproject.toml"
raise KeyError(msg) from e
except ImportError as e:
msg = "Could not import module defined in pyproject.toml"
raise ImportError(msg) from e
return app
def create_demo_server_configurable():
return create_demo_server(config_keys=["configurable"])
def create_demo_server_chat():
return create_demo_server(playground_type="chat")
|
# type: ignore
"""
Development Scripts for template packages
"""
from collections.abc import Sequence
from fastapi import FastAPI
from langserve import add_routes
from langchain_cli.utils.packages import get_langserve_export, get_package_root
def create_demo_server(
*,
config_keys: Sequence[str] = (),
playground_type: str = "default",
):
"""
Creates a demo server for the current template.
"""
app = FastAPI()
package_root = get_package_root()
pyproject = package_root / "pyproject.toml"
try:
package = get_langserve_export(pyproject)
mod = __import__(package["module"], fromlist=[package["attr"]])
chain = getattr(mod, package["attr"])
add_routes(
app,
chain,
config_keys=config_keys,
playground_type=playground_type,
)
except KeyError as e:
raise KeyError("Missing fields from pyproject.toml") from e
except ImportError as e:
raise ImportError("Could not import module defined in pyproject.toml") from e
return app
def create_demo_server_configurable():
return create_demo_server(config_keys=["configurable"])
def create_demo_server_chat():
return create_demo_server(playground_type="chat")
|
_base_ = './mask-rcnn_hrnetv2p-w18-1x_coco.py'
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py'
model = dict(
backbone=dict(
type='HRNet',
extra=dict(
stage2=dict(num_channels=(40, 80)),
stage3=dict(num_channels=(40, 80, 160)),
stage4=dict(num_channels=(40, 80, 160, 320))),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w40')),
neck=dict(type='HRFPN', in_channels=[40, 80, 160, 320], out_channels=256))
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 100k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(100_000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# 4. Before and during training, we use CrossEncoderClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CrossEncoderClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])),
labels=eval_dataset["label"],
name="AllNLI-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-nli"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CrossEncoderClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])),
test_dataset["label"],
name="AllNLI-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
"""
This examples trains a CrossEncoder for the NLI task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it learns to predict the labels: "contradiction": 0, "entailment": 1, "neutral": 2.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_nli.py
"""
import logging
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CEClassificationEvaluator
from sentence_transformers.cross_encoder.losses.CrossEntropyLoss import CrossEntropyLoss
from sentence_transformers.cross_encoder.trainer import CrossEncoderTrainer
from sentence_transformers.cross_encoder.training_args import CrossEncoderTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
train_batch_size = 64
num_epochs = 1
output_dir = "output/training_ce_allnli-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Define our CrossEncoder model. We use distilroberta-base as the base model and set it up to predict 3 labels
# You can also use other base models, like bert-base-uncased, microsoft/mpnet-base, etc.
model_name = "distilroberta-base"
model = CrossEncoder(model_name, num_labels=3)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 100k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(100_000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
test_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="test")
logging.info(train_dataset)
# 3. Define our training loss:
loss = CrossEntropyLoss(model)
# 4. Before and during training, we use CEClassificationEvaluator to measure the performance on the dev set
dev_cls_evaluator = CEClassificationEvaluator(
sentence_pairs=list(zip(eval_dataset["premise"], eval_dataset["hypothesis"])),
labels=eval_dataset["label"],
name="AllNLI-dev",
)
dev_cls_evaluator(model)
# 5. Define the training arguments
short_model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
run_name = f"reranker-{short_model_name}-nli"
args = CrossEncoderTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_epochs,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=False, # Set to False if you get an error that your GPU can't run on FP16
bf16=True, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=2,
logging_steps=100,
run_name=run_name, # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = CrossEncoderTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_cls_evaluator,
)
trainer.train()
# 7. Evaluate the final model on test dataset
test_cls_evaluator = CEClassificationEvaluator(
list(zip(test_dataset["premise"], test_dataset["hypothesis"])),
test_dataset["label"],
name="AllNLI-test",
)
test_cls_evaluator(model)
# 8. Save the final model
final_output_dir = f"{output_dir}/final"
model.save_pretrained(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(run_name)
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = CrossEncoder({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{run_name}')`."
)
|
import torch
from ..utils import _log_api_usage_once
from ._utils import _loss_inter_union, _upcast_non_float
def distance_box_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Gradient-friendly IoU loss with an additional penalty that is non-zero when the
distance between boxes' centers isn't zero. Indeed, for two exactly overlapping
boxes, the distance IoU is the same as the IoU loss.
This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``, and The two boxes should have the
same dimensions.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[N, 4]): second set of boxes
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
applied to the output. ``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``
eps (float, optional): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor: Loss tensor with the reduction option applied.
Reference:
Zhaohui Zheng et al.: Distance Intersection over Union Loss:
https://arxiv.org/abs/1911.08287
"""
# Original Implementation from https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/losses.py
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(distance_box_iou_loss)
boxes1 = _upcast_non_float(boxes1)
boxes2 = _upcast_non_float(boxes2)
loss, _ = _diou_iou_loss(boxes1, boxes2, eps)
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
def _diou_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
eps: float = 1e-7,
) -> tuple[torch.Tensor, torch.Tensor]:
intsct, union = _loss_inter_union(boxes1, boxes2)
iou = intsct / (union + eps)
# smallest enclosing box
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
# The diagonal distance of the smallest enclosing box squared
diagonal_distance_squared = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
# The distance between boxes' centers squared.
centers_distance_squared = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# The distance IoU is the IoU penalized by a normalized
# distance between boxes' centers squared.
loss = 1 - iou + (centers_distance_squared / diagonal_distance_squared)
return loss, iou
|
from typing import Tuple
import torch
from ..utils import _log_api_usage_once
from ._utils import _loss_inter_union, _upcast_non_float
def distance_box_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Gradient-friendly IoU loss with an additional penalty that is non-zero when the
distance between boxes' centers isn't zero. Indeed, for two exactly overlapping
boxes, the distance IoU is the same as the IoU loss.
This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
``0 <= x1 < x2`` and ``0 <= y1 < y2``, and The two boxes should have the
same dimensions.
Args:
boxes1 (Tensor[N, 4]): first set of boxes
boxes2 (Tensor[N, 4]): second set of boxes
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: No reduction will be
applied to the output. ``'mean'``: The output will be averaged.
``'sum'``: The output will be summed. Default: ``'none'``
eps (float, optional): small number to prevent division by zero. Default: 1e-7
Returns:
Tensor: Loss tensor with the reduction option applied.
Reference:
Zhaohui Zheng et al.: Distance Intersection over Union Loss:
https://arxiv.org/abs/1911.08287
"""
# Original Implementation from https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/losses.py
if not torch.jit.is_scripting() and not torch.jit.is_tracing():
_log_api_usage_once(distance_box_iou_loss)
boxes1 = _upcast_non_float(boxes1)
boxes2 = _upcast_non_float(boxes2)
loss, _ = _diou_iou_loss(boxes1, boxes2, eps)
# Check reduction option and return loss accordingly
if reduction == "none":
pass
elif reduction == "mean":
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
elif reduction == "sum":
loss = loss.sum()
else:
raise ValueError(
f"Invalid Value for arg 'reduction': '{reduction} \n Supported reduction modes: 'none', 'mean', 'sum'"
)
return loss
def _diou_iou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
eps: float = 1e-7,
) -> Tuple[torch.Tensor, torch.Tensor]:
intsct, union = _loss_inter_union(boxes1, boxes2)
iou = intsct / (union + eps)
# smallest enclosing box
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
# The diagonal distance of the smallest enclosing box squared
diagonal_distance_squared = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
# centers of boxes
x_p = (x2 + x1) / 2
y_p = (y2 + y1) / 2
x_g = (x1g + x2g) / 2
y_g = (y1g + y2g) / 2
# The distance between boxes' centers squared.
centers_distance_squared = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
# The distance IoU is the IoU penalized by a normalized
# distance between boxes' centers squared.
loss = 1 - iou + (centers_distance_squared / diagonal_distance_squared)
return loss, iou
|
PODCAST_DOCS = """API documentation:
Endpoint: https://listen-api.listennotes.com/api/v2
GET /search
This API is for searching podcasts or episodes.
Query parameters table:
q | string | Search term, e.g., person, place, topic... You can use double quotes to do verbatim match, e.g., "game of thrones". Otherwise, it's fuzzy search. | required
type | string | What type of contents do you want to search for? Available values: episode, podcast, curated. default: episode | optional
page_size | integer | The maximum number of search results per page. A valid value should be an integer between 1 and 10 (inclusive). default: 3 | optional
language | string | Limit search results to a specific language, e.g., English, Chinese ... If not specified, it'll be any language. It works only when type is episode or podcast. | optional
region | string | Limit search results to a specific region (e.g., us, gb, in...). If not specified, it'll be any region. It works only when type is episode or podcast. | optional
len_min | integer | Minimum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional
len_max | integer | Maximum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional
Response schema (JSON object):
next_offset | integer | optional
total | integer | optional
results | array[object] (Episode / Podcast List Result Object)
Each object in the "results" key has the following schema:
listennotes_url | string | optional
id | integer | optional
title_highlighted | string | optional
Use page_size: 3
""" # noqa: E501
|
# flake8: noqa
PODCAST_DOCS = """API documentation:
Endpoint: https://listen-api.listennotes.com/api/v2
GET /search
This API is for searching podcasts or episodes.
Query parameters table:
q | string | Search term, e.g., person, place, topic... You can use double quotes to do verbatim match, e.g., "game of thrones". Otherwise, it's fuzzy search. | required
type | string | What type of contents do you want to search for? Available values: episode, podcast, curated. default: episode | optional
page_size | integer | The maximum number of search results per page. A valid value should be an integer between 1 and 10 (inclusive). default: 3 | optional
language | string | Limit search results to a specific language, e.g., English, Chinese ... If not specified, it'll be any language. It works only when type is episode or podcast. | optional
region | string | Limit search results to a specific region (e.g., us, gb, in...). If not specified, it'll be any region. It works only when type is episode or podcast. | optional
len_min | integer | Minimum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional
len_max | integer | Maximum audio length in minutes. Applicable only when type parameter is episode or podcast. If type parameter is episode, it's for audio length of an episode. If type parameter is podcast, it's for average audio length of all episodes in a podcast. | optional
Response schema (JSON object):
next_offset | integer | optional
total | integer | optional
results | array[object] (Episode / Podcast List Result Object)
Each object in the "results" key has the following schema:
listennotes_url | string | optional
id | integer | optional
title_highlighted | string | optional
Use page_size: 3
"""
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from typing import TYPE_CHECKING
from langchain_core._import_utils import import_attr
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = (
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"format_document",
"aformat_document",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
)
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
result = import_attr(attr_name, module_name, __spec__.parent)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components and prompt values. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
""" # noqa: E501
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.prompts.base import (
BasePromptTemplate,
aformat_document,
format_document,
)
from langchain_core.prompts.chat import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_core.prompts.few_shot import (
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
)
from langchain_core.prompts.few_shot_with_templates import (
FewShotPromptWithTemplates,
)
from langchain_core.prompts.loading import load_prompt
from langchain_core.prompts.pipeline import PipelinePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.prompts.string import (
StringPromptTemplate,
check_valid_template,
get_template_variables,
jinja2_formatter,
validate_jinja2,
)
__all__ = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"FewShotChatMessagePromptTemplate",
"HumanMessagePromptTemplate",
"MessagesPlaceholder",
"PipelinePromptTemplate",
"PromptTemplate",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"format_document",
"aformat_document",
"check_valid_template",
"get_template_variables",
"jinja2_formatter",
"validate_jinja2",
]
_dynamic_imports = {
"BasePromptTemplate": "base",
"format_document": "base",
"aformat_document": "base",
"AIMessagePromptTemplate": "chat",
"BaseChatPromptTemplate": "chat",
"ChatMessagePromptTemplate": "chat",
"ChatPromptTemplate": "chat",
"HumanMessagePromptTemplate": "chat",
"MessagesPlaceholder": "chat",
"SystemMessagePromptTemplate": "chat",
"FewShotChatMessagePromptTemplate": "few_shot",
"FewShotPromptTemplate": "few_shot",
"FewShotPromptWithTemplates": "few_shot_with_templates",
"load_prompt": "loading",
"PipelinePromptTemplate": "pipeline",
"PromptTemplate": "prompt",
"StringPromptTemplate": "string",
"check_valid_template": "string",
"get_template_variables": "string",
"jinja2_formatter": "string",
"validate_jinja2": "string",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(TridentFasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = img_metas * num_branch
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
# TODO: Fix trident_img_metas undefined errors
# when proposals is specified
return self.roi_head.simple_test(
x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
"""make copies of img and gts to fit multi-branch."""
trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
trident_gt_labels = tuple(gt_labels * self.num_branch)
trident_img_metas = tuple(img_metas * self.num_branch)
return super(TridentFasterRCNN,
self).forward_train(img, trident_img_metas,
trident_gt_bboxes, trident_gt_labels)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(TridentFasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = img_metas * num_branch
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
"""make copies of img and gts to fit multi-branch."""
trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
trident_gt_labels = tuple(gt_labels * self.num_branch)
trident_img_metas = tuple(img_metas * self.num_branch)
return super(TridentFasterRCNN,
self).forward_train(img, trident_img_metas,
trident_gt_bboxes, trident_gt_labels)
|
"""
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
from typing import Dict
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import xgboost as xgb
class Plotting(xgb.callback.TrainingCallback):
"""Plot evaluation result during training. Only for demonstration purpose as it's
quite slow to draw using matplotlib.
"""
def __init__(self, rounds: int) -> None:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines: Dict[str, plt.Line2D] = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data: str, metric: str) -> str:
return f"{data}-{metric}"
def after_iteration(
self, model: xgb.Booster, epoch: int, evals_log: Dict[str, dict]
) -> bool:
"""Update the plot."""
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback() -> None:
"""Demo for defining a custom callback function that plots evaluation result during
training."""
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
"objective": "binary:logistic",
"eval_metric": ["error", "rmse"],
"tree_method": "hist",
"device": "cuda",
},
D_train,
evals=[(D_train, "Train"), (D_valid, "Valid")],
num_boost_round=num_boost_round,
callbacks=[plotting],
)
def check_point_callback() -> None:
"""Demo for using the checkpoint callback. Custom logic for handling output is
usually required and users are encouraged to define their own callback for
checkpointing operations. The builtin one can be used as a starting point.
"""
# Only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle: bool) -> None:
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
else:
path = os.path.join(
tmpdir,
f"model_{i}.{xgb.callback.TrainingCheckPoint.default_format}",
)
assert os.path.exists(path)
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, as_pickle=True, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
|
"""
Demo for using and defining callback functions
==============================================
.. versionadded:: 1.3.0
"""
import argparse
import os
import tempfile
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
import xgboost as xgb
class Plotting(xgb.callback.TrainingCallback):
"""Plot evaluation result during training. Only for demonstration purpose as it's quite
slow to draw.
"""
def __init__(self, rounds):
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.rounds = rounds
self.lines = {}
self.fig.show()
self.x = np.linspace(0, self.rounds, self.rounds)
plt.ion()
def _get_key(self, data, metric):
return f"{data}-{metric}"
def after_iteration(self, model, epoch, evals_log):
"""Update the plot."""
if not self.lines:
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
(self.lines[key],) = self.ax.plot(self.x, expanded, label=key)
self.ax.legend()
else:
# https://pythonspot.com/matplotlib-update-plot/
for data, metric in evals_log.items():
for metric_name, log in metric.items():
key = self._get_key(data, metric_name)
expanded = log + [0] * (self.rounds - len(log))
self.lines[key].set_ydata(expanded)
self.fig.canvas.draw()
# False to indicate training should not stop.
return False
def custom_callback():
"""Demo for defining a custom callback function that plots evaluation result during
training."""
X, y = load_breast_cancer(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
D_train = xgb.DMatrix(X_train, y_train)
D_valid = xgb.DMatrix(X_valid, y_valid)
num_boost_round = 100
plotting = Plotting(num_boost_round)
# Pass it to the `callbacks` parameter as a list.
xgb.train(
{
"objective": "binary:logistic",
"eval_metric": ["error", "rmse"],
"tree_method": "hist",
"device": "cuda",
},
D_train,
evals=[(D_train, "Train"), (D_valid, "Valid")],
num_boost_round=num_boost_round,
callbacks=[plotting],
)
def check_point_callback():
# only for demo, set a larger value (like 100) in practice as checkpointing is quite
# slow.
rounds = 2
def check(as_pickle):
for i in range(0, 10, rounds):
if i == 0:
continue
if as_pickle:
path = os.path.join(tmpdir, "model_" + str(i) + ".pkl")
else:
path = os.path.join(tmpdir, "model_" + str(i) + ".json")
assert os.path.exists(path)
X, y = load_breast_cancer(return_X_y=True)
m = xgb.DMatrix(X, y)
# Check point to a temporary directory for demo
with tempfile.TemporaryDirectory() as tmpdir:
# Use callback class from xgboost.callback
# Feel free to subclass/customize it to suit your need.
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(False)
# This version of checkpoint saves everything including parameters and
# model. See: doc/tutorials/saving_model.rst
check_point = xgb.callback.TrainingCheckPoint(
directory=tmpdir, interval=rounds, as_pickle=True, name="model"
)
xgb.train(
{"objective": "binary:logistic"},
m,
num_boost_round=10,
verbose_eval=False,
callbacks=[check_point],
)
check(True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--plot", default=1, type=int)
args = parser.parse_args()
check_point_callback()
if args.plot:
custom_callback()
|
from datetime import datetime
import pytest
from prisma.models import CreditTransaction
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import UserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = UserCredit(REFILL_VALUE)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
month1 = datetime(2022, 1, 15)
month2 = datetime(2022, 2, 15)
user_credit.time_now = lambda: month2
month2credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: month1
month1credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: month2
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
# Clear all transactions within the month
await CreditTransaction.prisma().update_many(
where={
"userId": DEFAULT_USER_ID,
"createdAt": {
"gte": datetime(2022, 2, 1),
"lt": datetime(2022, 3, 1),
},
},
data={"isActive": False},
)
user_credit.time_now = lambda: datetime(2022, 2, 15)
balance = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from datetime import datetime
import pytest
from prisma.models import UserBlockCredit
from backend.blocks.llm import AITextGeneratorBlock
from backend.data.credit import UserCredit
from backend.data.user import DEFAULT_USER_ID
from backend.integrations.credentials_store import openai_credentials
from backend.util.test import SpinTestServer
REFILL_VALUE = 1000
user_credit = UserCredit(REFILL_VALUE)
@pytest.mark.asyncio(scope="session")
async def test_block_credit_usage(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
spending_amount_1 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{
"model": "gpt-4-turbo",
"credentials": {
"id": openai_credentials.id,
"provider": openai_credentials.provider,
"type": openai_credentials.type,
},
},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_1 > 0
spending_amount_2 = await user_credit.spend_credits(
DEFAULT_USER_ID,
current_credit,
AITextGeneratorBlock().id,
{"model": "gpt-4-turbo", "api_key": "owned_api_key"},
0.0,
0.0,
validate_balance=False,
)
assert spending_amount_2 == 0
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit - spending_amount_1 - spending_amount_2
@pytest.mark.asyncio(scope="session")
async def test_block_credit_top_up(server: SpinTestServer):
current_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
new_credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert new_credit == current_credit + 100
@pytest.mark.asyncio(scope="session")
async def test_block_credit_reset(server: SpinTestServer):
month1 = datetime(2022, 1, 15)
month2 = datetime(2022, 2, 15)
user_credit.time_now = lambda: month2
month2credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
# Month 1 result should only affect month 1
user_credit.time_now = lambda: month1
month1credit = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
await user_credit.top_up_credits(DEFAULT_USER_ID, 100)
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month1credit + 100
# Month 2 balance is unaffected
user_credit.time_now = lambda: month2
assert await user_credit.get_or_refill_credit(DEFAULT_USER_ID) == month2credit
@pytest.mark.asyncio(scope="session")
async def test_credit_refill(server: SpinTestServer):
# Clear all transactions within the month
await UserBlockCredit.prisma().update_many(
where={
"userId": DEFAULT_USER_ID,
"createdAt": {
"gte": datetime(2022, 2, 1),
"lt": datetime(2022, 3, 1),
},
},
data={"isActive": False},
)
user_credit.time_now = lambda: datetime(2022, 2, 15)
balance = await user_credit.get_or_refill_credit(DEFAULT_USER_ID)
assert balance == REFILL_VALUE
|
from __future__ import annotations
import json
import logging
import os
from typing import Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: list[str],
word_weights: dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: list[str], **kwargs) -> list[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: list[list[int]], pad_seq_length: int = 0
) -> dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import json
import logging
import os
from typing import Dict, List, Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: List[List[int]], pad_seq_length: int = 0
) -> Dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
from llama_index.core.extractors.metadata_extractors import (
BaseExtractor,
KeywordExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
TitleExtractor,
)
def load_extractor(
data: dict,
) -> BaseExtractor:
if isinstance(data, BaseExtractor):
return data
extractor_name = data.get("class_name")
if extractor_name is None:
raise ValueError("Extractor loading requires a class_name")
if extractor_name == SummaryExtractor.class_name():
return SummaryExtractor.from_dict(data)
elif extractor_name == QuestionsAnsweredExtractor.class_name():
return QuestionsAnsweredExtractor.from_dict(data)
elif extractor_name == TitleExtractor.class_name():
return TitleExtractor.from_dict(data)
elif extractor_name == KeywordExtractor.class_name():
return KeywordExtractor.from_dict(data)
else:
raise ValueError(f"Unknown extractor name: {extractor_name}")
|
from llama_index.core.extractors.metadata_extractors import (
BaseExtractor,
KeywordExtractor,
QuestionsAnsweredExtractor,
SummaryExtractor,
TitleExtractor,
)
def load_extractor(
data: dict,
) -> BaseExtractor:
if isinstance(data, BaseExtractor):
return data
extractor_name = data.get("class_name", None)
if extractor_name is None:
raise ValueError("Extractor loading requires a class_name")
if extractor_name == SummaryExtractor.class_name():
return SummaryExtractor.from_dict(data)
elif extractor_name == QuestionsAnsweredExtractor.class_name():
return QuestionsAnsweredExtractor.from_dict(data)
elif extractor_name == TitleExtractor.class_name():
return TitleExtractor.from_dict(data)
elif extractor_name == KeywordExtractor.class_name():
return KeywordExtractor.from_dict(data)
else:
raise ValueError(f"Unknown extractor name: {extractor_name}")
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.5.3.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.5.2.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import FreeAnchorRetinaHead
class TestFreeAnchorRetinaHead(TestCase):
def test_free_anchor_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
anchor_head = FreeAnchorRetinaHead(num_classes=1, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
positive_bag_loss = empty_gt_losses['positive_bag_loss']
negative_bag_loss = empty_gt_losses['negative_bag_loss']
self.assertGreater(negative_bag_loss.item(), 0,
'negative_bag loss should be non-zero')
self.assertEqual(
positive_bag_loss.item(), 0,
'there should be no positive_bag loss when there are no true boxes'
)
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['positive_bag_loss']
onegt_box_loss = one_gt_losses['negative_bag_loss']
self.assertGreater(onegt_cls_loss.item(), 0,
'positive bag loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'negative bag loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.data import InstanceData
from mmdet import * # noqa
from mmdet.models.dense_heads import FreeAnchorRetinaHead
class TestFreeAnchorRetinaHead(TestCase):
def test_free_anchor_head_loss(self):
"""Tests rpn head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'pad_shape': (s, s, 3),
'scale_factor': 1,
}]
anchor_head = FreeAnchorRetinaHead(num_classes=1, in_channels=1)
# Anchor head expects a multiple levels of features per image
feats = (
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(anchor_head.prior_generator.strides)))
cls_scores, bbox_preds = anchor_head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but
# there should be no box loss.
positive_bag_loss = empty_gt_losses['positive_bag_loss']
negative_bag_loss = empty_gt_losses['negative_bag_loss']
self.assertGreater(negative_bag_loss.item(), 0,
'negative_bag loss should be non-zero')
self.assertEqual(
positive_bag_loss.item(), 0,
'there should be no positive_bag loss when there are no true boxes'
)
# When truth is non-empty then both cls and box loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([0])
one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['positive_bag_loss']
onegt_box_loss = one_gt_losses['negative_bag_loss']
self.assertGreater(onegt_cls_loss.item(), 0,
'positive bag loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'negative bag loss should be non-zero')
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
PromptValue --> StringPromptValue
ChatPromptValue
""" # noqa: E501
from typing import TYPE_CHECKING, Any
from langchain_core.example_selectors import (
LengthBasedExampleSelector,
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain_core.prompts import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
BasePromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
FewShotPromptWithTemplates,
HumanMessagePromptTemplate,
MessagesPlaceholder,
PipelinePromptTemplate,
PromptTemplate,
StringPromptTemplate,
SystemMessagePromptTemplate,
load_prompt,
)
from langchain._api import create_importer
from langchain.prompts.prompt import Prompt
if TYPE_CHECKING:
from langchain_community.example_selectors.ngram_overlap import (
NGramOverlapExampleSelector,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
MODULE_LOOKUP = {
"NGramOverlapExampleSelector": "langchain_community.example_selectors.ngram_overlap"
}
_import_attribute = create_importer(__file__, module_lookup=MODULE_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotChatMessagePromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"HumanMessagePromptTemplate",
"LengthBasedExampleSelector",
"MaxMarginalRelevanceExampleSelector",
"MessagesPlaceholder",
"NGramOverlapExampleSelector",
"PipelinePromptTemplate",
"Prompt",
"PromptTemplate",
"SemanticSimilarityExampleSelector",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
]
|
"""**Prompt** is the input to the model.
Prompt is often constructed
from multiple components. Prompt classes and functions make constructing
and working with prompts easy.
**Class hierarchy:**
.. code-block::
BasePromptTemplate --> PipelinePromptTemplate
StringPromptTemplate --> PromptTemplate
FewShotPromptTemplate
FewShotPromptWithTemplates
BaseChatPromptTemplate --> AutoGPTPrompt
ChatPromptTemplate --> AgentScratchPadChatPromptTemplate
BaseMessagePromptTemplate --> MessagesPlaceholder
BaseStringMessagePromptTemplate --> ChatMessagePromptTemplate
HumanMessagePromptTemplate
AIMessagePromptTemplate
SystemMessagePromptTemplate
PromptValue --> StringPromptValue
ChatPromptValue
""" # noqa: E501
from typing import TYPE_CHECKING, Any
from langchain_core.example_selectors import (
LengthBasedExampleSelector,
MaxMarginalRelevanceExampleSelector,
SemanticSimilarityExampleSelector,
)
from langchain_core.prompts import (
AIMessagePromptTemplate,
BaseChatPromptTemplate,
BasePromptTemplate,
ChatMessagePromptTemplate,
ChatPromptTemplate,
FewShotChatMessagePromptTemplate,
FewShotPromptTemplate,
FewShotPromptWithTemplates,
HumanMessagePromptTemplate,
MessagesPlaceholder,
PipelinePromptTemplate,
PromptTemplate,
StringPromptTemplate,
SystemMessagePromptTemplate,
load_prompt,
)
from langchain._api import create_importer
from langchain.prompts.prompt import Prompt
if TYPE_CHECKING:
from langchain_community.example_selectors.ngram_overlap import (
NGramOverlapExampleSelector,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
MODULE_LOOKUP = {
"NGramOverlapExampleSelector": "langchain_community.example_selectors.ngram_overlap"
}
_import_attribute = create_importer(__file__, module_lookup=MODULE_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AIMessagePromptTemplate",
"BaseChatPromptTemplate",
"BasePromptTemplate",
"ChatMessagePromptTemplate",
"ChatPromptTemplate",
"FewShotPromptTemplate",
"FewShotPromptWithTemplates",
"HumanMessagePromptTemplate",
"LengthBasedExampleSelector",
"MaxMarginalRelevanceExampleSelector",
"MessagesPlaceholder",
"NGramOverlapExampleSelector",
"PipelinePromptTemplate",
"PromptTemplate",
"SemanticSimilarityExampleSelector",
"StringPromptTemplate",
"SystemMessagePromptTemplate",
"load_prompt",
"FewShotChatMessagePromptTemplate",
"Prompt",
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
traversal_paths: Iterable[str] = ('r',),
batch_size: int = 32,
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
:param device: device to use for encoding. ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.batch_size = batch_size
self.traversal_paths = traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: Dict = {}, **kwargs
):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.traversal_paths),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from typing import Dict, Iterable, Optional
import spacy
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
_EXCLUDE_COMPONENTS = [
'tagger',
'parser',
'ner',
'senter',
'lemmatizer',
'attribute_ruler',
]
class SpacyTextEncoder(Executor):
"""
:class:`SpacyTextEncoder` encodes ``Document`` using models offered by Spacy
"""
def __init__(
self,
model_name: str = 'en_core_web_sm',
download_data: bool = True,
default_batch_size: int = 32,
default_traversal_paths: Iterable[str] = ('r',),
device: str = 'cpu',
*args,
**kwargs,
):
"""
:param model_name: pre-trained spaCy language pipeline name
:param default_batch_size: fallback batch size in case there is not batch size sent in the request
:param default_traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param device: device to use for encoding ['cuda', 'cpu', 'cuda:2']
"""
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.device = device
if device.startswith('cuda'):
spacy.require_gpu()
if download_data:
subprocess.run(
['python3', '-m', 'spacy', 'download', model_name], check=True
)
self.spacy_model = spacy.load(model_name, exclude=_EXCLUDE_COMPONENTS)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the embedding
attribute of the docs.
:param docs: documents sent to the encoder. The docs must have the
``text`` attribute.
:param parameters: dictionary to define the ``traversal_path`` and the
``batch_size``. For example,
``parameters={'traversal_paths': ['r'], 'batch_size': 10}``
"""
if self.device.startswith('cuda'):
from cupy import asnumpy
if docs:
batch_size = parameters.get('batch_size', self.default_batch_size)
document_batches_generator = get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=batch_size,
needs_attr='text',
)
for document_batch in document_batches_generator:
texts = [doc.text for doc in document_batch]
for doc, spacy_doc in zip(
document_batch, self.spacy_model.pipe(texts, batch_size=batch_size)
):
if self.device.startswith('cuda'):
doc.embedding = asnumpy(spacy_doc.vector)
else:
doc.embedding = spacy_doc.vector
|
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.weaviate import DocumentArrayWeaviate, WeaviateConfig
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
def test_construct_docarray(da_cls, config, start_storage):
if config:
da = da_cls(config=config)
assert len(da) == 0
da = da_cls(Document(), config=config)
assert len(da) == 1
da = da_cls([Document(), Document()], config=config)
assert len(da) == 2
da = da_cls((Document(), Document()), config=config)
assert len(da) == 2
da = da_cls((Document() for _ in range(10)), config=config)
assert len(da) == 10
else:
da = da_cls()
assert len(da) == 0
da = da_cls(Document())
assert len(da) == 1
da = da_cls([Document(), Document()])
assert len(da) == 2
da = da_cls((Document(), Document()))
assert len(da) == 2
da = da_cls((Document() for _ in range(10)))
assert len(da) == 10
if da_cls is DocumentArrayInMemory:
da1 = da_cls(da)
assert len(da1) == 10
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_singleton(da_cls, config, is_copy, start_storage):
d = Document()
if config:
da = da_cls(d, copy=is_copy, config=config)
else:
da = da_cls(d, copy=is_copy)
d.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0].id != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_da(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
if config:
da = da_cls([d1, d2], copy=is_copy, config=config)
else:
da = da_cls([d1, d2], copy=is_copy)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=1)),
(DocumentArrayQdrant, QdrantConfig(n_dim=1)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
(DocumentArrayRedis, RedisConfig(n_dim=128, flush=True)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_list(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
da = da_cls([d1, d2], copy=is_copy, config=config)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
|
import pytest
from docarray import Document
from docarray.array.memory import DocumentArrayInMemory
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.weaviate import DocumentArrayWeaviate, WeaviateConfig
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
def test_construct_docarray(da_cls, config, start_storage):
if config:
da = da_cls(config=config)
assert len(da) == 0
da = da_cls(Document(), config=config)
assert len(da) == 1
da = da_cls([Document(), Document()], config=config)
assert len(da) == 2
da = da_cls((Document(), Document()), config=config)
assert len(da) == 2
da = da_cls((Document() for _ in range(10)), config=config)
assert len(da) == 10
else:
da = da_cls()
assert len(da) == 0
da = da_cls(Document())
assert len(da) == 1
da = da_cls([Document(), Document()])
assert len(da) == 2
da = da_cls((Document(), Document()))
assert len(da) == 2
da = da_cls((Document() for _ in range(10)))
assert len(da) == 10
if da_cls is DocumentArrayInMemory:
da1 = da_cls(da)
assert len(da1) == 10
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_singleton(da_cls, config, is_copy, start_storage):
d = Document()
if config:
da = da_cls(d, copy=is_copy, config=config)
else:
da = da_cls(d, copy=is_copy)
d.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0].id != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=128)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=128)),
(DocumentArrayQdrant, QdrantConfig(n_dim=128)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_da(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
if config:
da = da_cls([d1, d2], copy=is_copy, config=config)
else:
da = da_cls([d1, d2], copy=is_copy)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArrayInMemory, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=1)),
(DocumentArrayQdrant, QdrantConfig(n_dim=1)),
(DocumentArrayElastic, ElasticConfig(n_dim=128)),
],
)
@pytest.mark.parametrize('is_copy', [True, False])
def test_docarray_copy_list(da_cls, config, is_copy, start_storage):
d1 = Document()
d2 = Document()
da = da_cls([d1, d2], copy=is_copy, config=config)
d1.id = 'hello'
if da_cls == DocumentArrayInMemory:
if is_copy:
assert da[0].id != 'hello'
else:
assert da[0].id == 'hello'
else:
assert da[0] != 'hello'
|
import logging
import os
import torch
from torchaudio._internal import (
download_url_to_file,
module_utils as _mod_utils,
)
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
)
def _get_phones():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
)
def _to_tensor(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths
def _load_phonemizer(file, dl_kwargs):
if not _mod_utils.is_module_available("dp"):
raise RuntimeError("DeepPhonemizer is not installed. Please install it.")
from dp.phonemizer import Phonemizer
# By default, dp issues DEBUG level log.
logger = logging.getLogger("dp")
orig_level = logger.level
logger.setLevel(logging.INFO)
try:
url = f"https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}"
directory = os.path.join(torch.hub.get_dir(), "checkpoints")
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, file)
if not os.path.exists(path):
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
download_url_to_file(url, path, **dl_kwargs)
return Phonemizer.from_checkpoint(path)
finally:
logger.setLevel(orig_level)
def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
waveform = torch.clamp(waveform, -1, 1)
waveform = (waveform + 1.0) * (2**bits - 1) / 2
return torch.clamp(waveform, 0, 2**bits - 1).int()
def _get_taco_params(n_symbols):
return {
"mask_padding": False,
"n_mels": 80,
"n_frames_per_step": 1,
"symbol_embedding_dim": 512,
"encoder_embedding_dim": 512,
"encoder_n_convolution": 3,
"encoder_kernel_size": 5,
"decoder_rnn_dim": 1024,
"decoder_max_step": 2000,
"decoder_dropout": 0.1,
"decoder_early_stopping": True,
"attention_rnn_dim": 1024,
"attention_hidden_dim": 128,
"attention_location_n_filter": 32,
"attention_location_kernel_size": 31,
"attention_dropout": 0.1,
"prenet_dim": 256,
"postnet_n_convolution": 5,
"postnet_kernel_size": 5,
"postnet_embedding_dim": 512,
"gate_threshold": 0.5,
"n_symbol": n_symbols,
}
def _get_wrnn_params():
return {
"upsample_scales": [5, 5, 11],
"n_classes": 2**8, # n_bits = 8
"hop_length": 275,
"n_res_block": 10,
"n_rnn": 512,
"n_fc": 512,
"kernel_size": 5,
"n_freq": 80,
"n_hidden": 128,
"n_output": 128,
}
|
import logging
import os
import torch
from torchaudio._internal import (
download_url_to_file,
module_utils as _mod_utils,
)
def _get_chars():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
)
def _get_phones():
return (
"_",
"-",
"!",
"'",
"(",
")",
",",
".",
":",
";",
"?",
" ",
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
)
def _to_tensor(indices):
lengths = torch.tensor([len(i) for i in indices], dtype=torch.int32)
values = [torch.tensor(i) for i in indices]
values = torch.nn.utils.rnn.pad_sequence(values, batch_first=True)
return values, lengths
def _load_phonemizer(file, dl_kwargs):
if not _mod_utils.is_module_available("dp"):
raise RuntimeError("DeepPhonemizer is not installed. Please install it.")
from dp.phonemizer import Phonemizer
# By default, dp issues DEBUG level log.
logger = logging.getLogger("dp")
orig_level = logger.level
logger.setLevel(logging.INFO)
try:
url = f"https://public-asai-dl-models.s3.eu-central-1.amazonaws.com/DeepPhonemizer/{file}"
directory = os.path.join(torch.hub.get_dir(), "checkpoints")
os.makedirs(directory, exist_ok=True)
path = os.path.join(directory, file)
if not os.path.exists(path):
dl_kwargs = {} if dl_kwargs is None else dl_kwargs
download_url_to_file(url, path, **dl_kwargs)
return Phonemizer.from_checkpoint(path)
finally:
logger.setLevel(orig_level)
def _unnormalize_waveform(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]"""
waveform = torch.clamp(waveform, -1, 1)
waveform = (waveform + 1.0) * (2 ** bits - 1) / 2
return torch.clamp(waveform, 0, 2 ** bits - 1).int()
def _get_taco_params(n_symbols):
return {
"mask_padding": False,
"n_mels": 80,
"n_frames_per_step": 1,
"symbol_embedding_dim": 512,
"encoder_embedding_dim": 512,
"encoder_n_convolution": 3,
"encoder_kernel_size": 5,
"decoder_rnn_dim": 1024,
"decoder_max_step": 2000,
"decoder_dropout": 0.1,
"decoder_early_stopping": True,
"attention_rnn_dim": 1024,
"attention_hidden_dim": 128,
"attention_location_n_filter": 32,
"attention_location_kernel_size": 31,
"attention_dropout": 0.1,
"prenet_dim": 256,
"postnet_n_convolution": 5,
"postnet_kernel_size": 5,
"postnet_embedding_dim": 512,
"gate_threshold": 0.5,
"n_symbol": n_symbols,
}
def _get_wrnn_params():
return {
"upsample_scales": [5, 5, 11],
"n_classes": 2 ** 8, # n_bits = 8
"hop_length": 275,
"n_res_block": 10,
"n_rnn": 512,
"n_fc": 512,
"kernel_size": 5,
"n_freq": 80,
"n_hidden": 128,
"n_output": 128,
}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403
from .panoptic_fusion_heads import * # noqa: F401,F403
|
from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403
from .panoptic_fusion_heads import * # noqa: F401,F403
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))
|
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
# model settings
model = dict(bbox_head=dict(center_sampling=True, center_sample_radius=1.5))
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationDataType_co,
NotificationEventModel,
NotificationTypeOverride,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: (
NotificationEventModel[NotificationDataType_co]
| list[NotificationEventModel[NotificationDataType_co]]
),
user_unsub_link: str | None = None,
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
base_url = (
settings.config.frontend_base_url or settings.config.platform_base_url
)
# Handle the case when data is a list
template_data = data
if isinstance(data, list):
# Create a dictionary with a 'notifications' key containing the list
template_data = {"notifications": data}
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=template_data,
unsubscribe_link=f"{base_url}/profile/settings",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(
user_email=user_email,
user_unsubscribe_link=user_unsub_link,
subject=subject,
body=full_message,
)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(
self,
user_email: str,
subject: str,
body: str,
user_unsubscribe_link: str | None = None,
):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
# Headers default to None internally so this is fine
Headers=(
{
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
"List-Unsubscribe": f"<{user_unsubscribe_link}>",
}
if user_unsubscribe_link
else None
),
)
|
import logging
import pathlib
from postmarker.core import PostmarkClient
from postmarker.models.emails import EmailManager
from prisma.enums import NotificationType
from pydantic import BaseModel
from backend.data.notifications import (
NotificationEventModel,
NotificationTypeOverride,
T_co,
)
from backend.util.settings import Settings
from backend.util.text import TextFormatter
logger = logging.getLogger(__name__)
settings = Settings()
# The following is a workaround to get the type checker to recognize the EmailManager type
# This is a temporary solution and should be removed once the Postmark library is updated
# to support type annotations.
class TypedPostmarkClient(PostmarkClient):
emails: EmailManager
class Template(BaseModel):
subject_template: str
body_template: str
base_template: str
class EmailSender:
def __init__(self):
if settings.secrets.postmark_server_api_token:
self.postmark = TypedPostmarkClient(
server_token=settings.secrets.postmark_server_api_token
)
else:
logger.warning(
"Postmark server API token not found, email sending disabled"
)
self.postmark = None
self.formatter = TextFormatter()
def send_templated(
self,
notification: NotificationType,
user_email: str,
data: NotificationEventModel[T_co] | list[NotificationEventModel[T_co]],
user_unsub_link: str | None = None,
):
"""Send an email to a user using a template pulled from the notification type"""
if not self.postmark:
logger.warning("Postmark client not initialized, email not sent")
return
template = self._get_template(notification)
base_url = (
settings.config.frontend_base_url or settings.config.platform_base_url
)
# Handle the case when data is a list
template_data = data
if isinstance(data, list):
# Create a dictionary with a 'notifications' key containing the list
template_data = {"notifications": data}
try:
subject, full_message = self.formatter.format_email(
base_template=template.base_template,
subject_template=template.subject_template,
content_template=template.body_template,
data=template_data,
unsubscribe_link=f"{base_url}/profile/settings",
)
except Exception as e:
logger.error(f"Error formatting full message: {e}")
raise e
self._send_email(
user_email=user_email,
user_unsubscribe_link=user_unsub_link,
subject=subject,
body=full_message,
)
def _get_template(self, notification: NotificationType):
# convert the notification type to a notification type override
notification_type_override = NotificationTypeOverride(notification)
# find the template in templates/name.html (the .template returns with the .html)
template_path = f"templates/{notification_type_override.template}.jinja2"
logger.debug(
f"Template full path: {pathlib.Path(__file__).parent / template_path}"
)
base_template_path = "templates/base.html.jinja2"
with open(pathlib.Path(__file__).parent / base_template_path, "r") as file:
base_template = file.read()
with open(pathlib.Path(__file__).parent / template_path, "r") as file:
template = file.read()
return Template(
subject_template=notification_type_override.subject,
body_template=template,
base_template=base_template,
)
def _send_email(
self,
user_email: str,
subject: str,
body: str,
user_unsubscribe_link: str | None = None,
):
if not self.postmark:
logger.warning("Email tried to send without postmark configured")
return
logger.debug(f"Sending email to {user_email} with subject {subject}")
self.postmark.emails.send(
From=settings.config.postmark_sender_email,
To=user_email,
Subject=subject,
HtmlBody=body,
# Headers default to None internally so this is fine
Headers=(
{
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
"List-Unsubscribe": f"<{user_unsubscribe_link}>",
}
if user_unsubscribe_link
else None
),
)
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
def test_speed(self):
leading_dims = (3, 2)
T = 200
waveform = torch.rand(*leading_dims, T, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.randint(1, T, leading_dims, dtype=self.dtype, device=self.device)
self._assert_consistency(F.speed, (waveform, lengths, 1000, 1.1))
|
import unittest
import torch
import torchaudio.prototype.functional as F
from torchaudio_unittest.common_utils import nested_params, TestBaseMixin, torch_script
class TorchScriptConsistencyTestImpl(TestBaseMixin):
def _assert_consistency(self, func, inputs, shape_only=False):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(device=self.device, dtype=self.dtype)
inputs_.append(i)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(*inputs_)
torch.random.manual_seed(40)
ts_output = ts_func(*inputs_)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
@nested_params(
["convolve", "fftconvolve"],
["full", "valid", "same"],
)
def test_convolve(self, fn, mode):
leading_dims = (2, 3, 2)
L_x, L_y = 32, 55
x = torch.rand(*leading_dims, L_x, dtype=self.dtype, device=self.device)
y = torch.rand(*leading_dims, L_y, dtype=self.dtype, device=self.device)
self._assert_consistency(getattr(F, fn), (x, y, mode))
def test_add_noise(self):
leading_dims = (2, 3)
L = 31
waveform = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
noise = torch.rand(*leading_dims, L, dtype=self.dtype, device=self.device, requires_grad=True)
lengths = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True)
snr = torch.rand(*leading_dims, dtype=self.dtype, device=self.device, requires_grad=True) * 10
self._assert_consistency(F.add_noise, (waveform, noise, lengths, snr))
def test_barkscale_fbanks(self):
if self.device != torch.device("cpu"):
raise unittest.SkipTest("No need to perform test on device other than CPU")
n_stft = 100
f_min = 0.0
f_max = 20.0
n_barks = 10
sample_rate = 16000
self._assert_consistency(F.barkscale_fbanks, (n_stft, f_min, f_max, n_barks, sample_rate, "traunmuller"))
def test_oscillator_bank(self):
num_frames, num_pitches, sample_rate = 8000, 8, 8000
freq = torch.rand((num_frames, num_pitches), dtype=self.dtype, device=self.device)
amps = torch.ones_like(freq)
self._assert_consistency(F.oscillator_bank, (freq, amps, sample_rate, "sum"))
def test_extend_pitch(self):
num_frames = 5
input = torch.ones((num_frames, 1), device=self.device, dtype=self.dtype)
num_pitches = 7
pattern = [i + 1.0 for i in range(num_pitches)]
self._assert_consistency(F.extend_pitch, (input, num_pitches))
self._assert_consistency(F.extend_pitch, (input, pattern))
self._assert_consistency(F.extend_pitch, (input, torch.tensor(pattern)))
def test_sinc_ir(self):
cutoff = torch.tensor([0, 0.5, 1.0], device=self.device, dtype=self.dtype)
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, False))
self._assert_consistency(F.sinc_impulse_response, (cutoff, 513, True))
|
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.nebula import NebulaGraphStore
def test_nebula_graph_store():
names_of_bases = [b.__name__ for b in NebulaGraphStore.__bases__]
assert GraphStore.__name__ in names_of_bases
|
from unittest.mock import MagicMock, patch
from llama_index.core.graph_stores.types import GraphStore
from llama_index.graph_stores.nebula import NebulaGraphStore
@patch("llama_index.graph_stores.nebula.NebulaGraphStore")
def test_kuzu_graph_store(MockNebulaGraphStore: MagicMock):
instance: NebulaGraphStore = MockNebulaGraphStore.return_value()
assert isinstance(instance, GraphStore)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .mask2former_track_head import Mask2FormerTrackHead
from .quasi_dense_embed_head import QuasiDenseEmbedHead
from .quasi_dense_track_head import QuasiDenseTrackHead
from .roi_embed_head import RoIEmbedHead
from .roi_track_head import RoITrackHead
__all__ = [
'QuasiDenseEmbedHead', 'QuasiDenseTrackHead', 'Mask2FormerTrackHead',
'RoIEmbedHead', 'RoITrackHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .mask2former_track_head import Mask2FormerTrackHead
from .quasi_dense_embed_head import QuasiDenseEmbedHead
from .quasi_dense_track_head import QuasiDenseTrackHead
__all__ = [
'QuasiDenseEmbedHead', 'QuasiDenseTrackHead', 'Mask2FormerTrackHead'
]
|
import json
from typing import Tuple
import responses
from requests import Request
from langchain_community.document_loaders import HuggingFaceModelLoader
# Mocked model data to simulate an API response
MOCKED_MODELS_RESPONSE = [
{
"_id": "657a1fff16886e681230c05a",
"id": "microsoft/phi-2",
"likes": 2692,
"private": False,
"downloads": 546775,
"tags": [
"transformers",
"safetensors",
"phi",
"text-generation",
"nlp",
"code",
"custom_code",
"en",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us",
],
"pipeline_tag": "text-generation",
"library_name": "transformers",
"createdAt": "2023-12-13T21:19:59.000Z",
"modelId": "microsoft/phi-2",
},
# Add additional models as needed
]
# Mocked README content for models
MOCKED_README_CONTENT = {
"microsoft/phi-2": "README content for microsoft/phi-2",
"openai/gpt-3": "README content for openai/gpt-3",
}
def response_callback(request: Request) -> Tuple[int, dict, str]:
if "/api/models" in request.url:
return (200, {}, json.dumps(MOCKED_MODELS_RESPONSE))
elif "README.md" in request.url:
model_id = (
request.url.split("/")[3] + "/" + request.url.split("/")[4]
) # Extract model_id
content = MOCKED_README_CONTENT.get(model_id, "")
return (200, {}, content)
return (404, {}, "Not Found")
@responses.activate
def test_load_models_with_readme() -> None:
"""Tests loading models along with their README content."""
responses.add_callback(
responses.GET,
"https://huggingface.co/api/models",
callback=response_callback, # type: ignore[arg-type]
content_type="application/json",
)
responses.add_callback(
responses.GET,
# Use a regex or update this placeholder
"https://huggingface.co/microsoft/phi-2/raw/main/README.md",
callback=response_callback, # type: ignore[arg-type]
content_type="text/plain",
)
loader = HuggingFaceModelLoader(search="phi-2", limit=2)
docs = loader.load()
assert len(docs) == len(MOCKED_MODELS_RESPONSE)
for doc, expected_model in zip(docs, MOCKED_MODELS_RESPONSE):
id_ = expected_model["id"]
assert isinstance(id_, str)
assert doc.page_content == MOCKED_README_CONTENT[id_]
assert doc.metadata["modelId"] == expected_model["id"]
|
import json
from typing import Tuple
import responses
from requests import Request
from langchain_community.document_loaders import HuggingFaceModelLoader
# Mocked model data to simulate an API response
MOCKED_MODELS_RESPONSE = [
{
"_id": "657a1fff16886e681230c05a",
"id": "microsoft/phi-2",
"likes": 2692,
"private": False,
"downloads": 546775,
"tags": [
"transformers",
"safetensors",
"phi",
"text-generation",
"nlp",
"code",
"custom_code",
"en",
"license:mit",
"autotrain_compatible",
"endpoints_compatible",
"has_space",
"region:us",
],
"pipeline_tag": "text-generation",
"library_name": "transformers",
"createdAt": "2023-12-13T21:19:59.000Z",
"modelId": "microsoft/phi-2",
},
# Add additional models as needed
]
# Mocked README content for models
MOCKED_README_CONTENT = {
"microsoft/phi-2": "README content for microsoft/phi-2",
"openai/gpt-3": "README content for openai/gpt-3",
}
def response_callback(request: Request) -> Tuple[int, dict, str]:
if "/api/models" in request.url:
return (200, {}, json.dumps(MOCKED_MODELS_RESPONSE))
elif "README.md" in request.url:
model_id = (
request.url.split("/")[3] + "/" + request.url.split("/")[4]
) # Extract model_id
content = MOCKED_README_CONTENT.get(model_id, "")
return (200, {}, content)
return (404, {}, "Not Found")
@responses.activate
def test_load_models_with_readme() -> None:
"""Tests loading models along with their README content."""
responses.add_callback(
responses.GET,
"https://huggingface.co/api/models",
callback=response_callback, # type: ignore
content_type="application/json",
)
responses.add_callback(
responses.GET,
# Use a regex or update this placeholder
"https://huggingface.co/microsoft/phi-2/raw/main/README.md",
callback=response_callback, # type: ignore
content_type="text/plain",
)
loader = HuggingFaceModelLoader(search="phi-2", limit=2)
docs = loader.load()
assert len(docs) == len(MOCKED_MODELS_RESPONSE)
for doc, expected_model in zip(docs, MOCKED_MODELS_RESPONSE):
id_ = expected_model["id"]
assert isinstance(id_, str)
assert doc.page_content == MOCKED_README_CONTENT[id_]
assert doc.metadata["modelId"] == expected_model["id"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
class_name: bbox_coords,
'score': score
})
return output
|
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
class_name: bbox_coords,
'score': score
})
return output
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.redis.schema import (
FlatVectorField,
HNSWVectorField,
NumericFieldSchema,
RedisDistanceMetric,
RedisField,
RedisModel,
RedisVectorField,
TagFieldSchema,
TextFieldSchema,
read_schema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedisDistanceMetric": "langchain_community.vectorstores.redis.schema",
"RedisField": "langchain_community.vectorstores.redis.schema",
"TextFieldSchema": "langchain_community.vectorstores.redis.schema",
"TagFieldSchema": "langchain_community.vectorstores.redis.schema",
"NumericFieldSchema": "langchain_community.vectorstores.redis.schema",
"RedisVectorField": "langchain_community.vectorstores.redis.schema",
"FlatVectorField": "langchain_community.vectorstores.redis.schema",
"HNSWVectorField": "langchain_community.vectorstores.redis.schema",
"RedisModel": "langchain_community.vectorstores.redis.schema",
"read_schema": "langchain_community.vectorstores.redis.schema",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FlatVectorField",
"HNSWVectorField",
"NumericFieldSchema",
"RedisDistanceMetric",
"RedisField",
"RedisModel",
"RedisVectorField",
"TagFieldSchema",
"TextFieldSchema",
"read_schema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores.redis.schema import (
FlatVectorField,
HNSWVectorField,
NumericFieldSchema,
RedisDistanceMetric,
RedisField,
RedisModel,
RedisVectorField,
TagFieldSchema,
TextFieldSchema,
read_schema,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RedisDistanceMetric": "langchain_community.vectorstores.redis.schema",
"RedisField": "langchain_community.vectorstores.redis.schema",
"TextFieldSchema": "langchain_community.vectorstores.redis.schema",
"TagFieldSchema": "langchain_community.vectorstores.redis.schema",
"NumericFieldSchema": "langchain_community.vectorstores.redis.schema",
"RedisVectorField": "langchain_community.vectorstores.redis.schema",
"FlatVectorField": "langchain_community.vectorstores.redis.schema",
"HNSWVectorField": "langchain_community.vectorstores.redis.schema",
"RedisModel": "langchain_community.vectorstores.redis.schema",
"read_schema": "langchain_community.vectorstores.redis.schema",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RedisDistanceMetric",
"RedisField",
"TextFieldSchema",
"TagFieldSchema",
"NumericFieldSchema",
"RedisVectorField",
"FlatVectorField",
"HNSWVectorField",
"RedisModel",
"read_schema",
]
|
import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
(set(block.keys()) <= {"type", "image_url", "detail"})
and (image_url := block.get("image_url"))
and isinstance(image_url, dict)
):
url = image_url.get("url")
if isinstance(url, str):
return True
elif block.get("type") == "file":
if (file := block.get("file")) and isinstance(file, dict):
file_data = file.get("file_data")
if isinstance(file_data, str):
return True
elif block.get("type") == "input_audio":
if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict):
audio_data = input_audio.get("data")
audio_format = input_audio.get("format")
if isinstance(audio_data, str) and isinstance(audio_format, str):
return True
else:
return False
return False
def _parse_data_uri(uri: str) -> Optional[dict]:
"""Parse a data URI into its components. If parsing fails, return None.
Example:
.. code-block:: python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
"""
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
match = re.match(regex, uri)
if match is None:
return None
return {
"source_type": "base64",
"data": match.group("data"),
"mime_type": match.group("mime_type"),
}
def _convert_openai_format_to_data_block(block: dict) -> dict:
"""Convert OpenAI image content block to standard data content block.
If parsing fails, pass-through.
Args:
block: The OpenAI image content block to convert.
Returns:
The converted standard data content block.
"""
if block["type"] == "image_url":
parsed = _parse_data_uri(block["image_url"]["url"])
if parsed is not None:
parsed["type"] = "image"
return parsed
return block
if block["type"] == "file":
parsed = _parse_data_uri(block["file"]["file_data"])
if parsed is not None:
parsed["type"] = "file"
if filename := block["file"].get("filename"):
parsed["filename"] = filename
return parsed
return block
if block["type"] == "input_audio":
data = block["input_audio"].get("data")
format = block["input_audio"].get("format")
if data and format:
return {
"type": "audio",
"source_type": "base64",
"data": data,
"mime_type": f"audio/{format}",
}
return block
return block
def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
"""Extend support for message formats.
Chat models implement support for images in OpenAI Chat Completions format, as well
as other multimodal data as standard data blocks. This function extends support to
audio and file data in OpenAI Chat Completions format by converting them to standard
data blocks.
"""
formatted_messages = []
for message in messages:
formatted_message = message
if isinstance(message.content, list):
for idx, block in enumerate(message.content):
if (
isinstance(block, dict)
# Subset to (PDF) files and audio, as most relevant chat models
# support images in OAI format (and some may not yet support the
# standard data block format)
and block.get("type") in ("file", "input_audio")
and _is_openai_data_block(block)
):
if formatted_message is message:
formatted_message = message.model_copy()
# Also shallow-copy content
formatted_message.content = list(formatted_message.content)
formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
_convert_openai_format_to_data_block(block)
)
formatted_messages.append(formatted_message)
return formatted_messages
|
import re
from collections.abc import Sequence
from typing import Optional
from langchain_core.messages import BaseMessage
def _is_openai_data_block(block: dict) -> bool:
"""Check if the block contains multimodal data in OpenAI Chat Completions format."""
if block.get("type") == "image_url":
if (
(set(block.keys()) <= {"type", "image_url", "detail"})
and (image_url := block.get("image_url"))
and isinstance(image_url, dict)
):
url = image_url.get("url")
if isinstance(url, str):
return True
elif block.get("type") == "file":
if (file := block.get("file")) and isinstance(file, dict):
file_data = file.get("file_data")
if isinstance(file_data, str):
return True
elif block.get("type") == "input_audio": # noqa: SIM102
if (input_audio := block.get("input_audio")) and isinstance(input_audio, dict):
audio_data = input_audio.get("data")
audio_format = input_audio.get("format")
if isinstance(audio_data, str) and isinstance(audio_format, str):
return True
else:
return False
return False
def _parse_data_uri(uri: str) -> Optional[dict]:
"""Parse a data URI into its components. If parsing fails, return None.
Example:
.. code-block:: python
data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
parsed = _parse_data_uri(data_uri)
assert parsed == {
"source_type": "base64",
"mime_type": "image/jpeg",
"data": "/9j/4AAQSkZJRg...",
}
"""
regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
match = re.match(regex, uri)
if match is None:
return None
return {
"source_type": "base64",
"data": match.group("data"),
"mime_type": match.group("mime_type"),
}
def _convert_openai_format_to_data_block(block: dict) -> dict:
"""Convert OpenAI image content block to standard data content block.
If parsing fails, pass-through.
Args:
block: The OpenAI image content block to convert.
Returns:
The converted standard data content block.
"""
if block["type"] == "image_url":
parsed = _parse_data_uri(block["image_url"]["url"])
if parsed is not None:
parsed["type"] = "image"
return parsed
return block
if block["type"] == "file":
parsed = _parse_data_uri(block["file"]["file_data"])
if parsed is not None:
parsed["type"] = "file"
if filename := block["file"].get("filename"):
parsed["filename"] = filename
return parsed
return block
if block["type"] == "input_audio":
data = block["input_audio"].get("data")
format = block["input_audio"].get("format")
if data and format:
return {
"type": "audio",
"source_type": "base64",
"data": data,
"mime_type": f"audio/{format}",
}
return block
return block
def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
"""Extend support for message formats.
Chat models implement support for images in OpenAI Chat Completions format, as well
as other multimodal data as standard data blocks. This function extends support to
audio and file data in OpenAI Chat Completions format by converting them to standard
data blocks.
"""
formatted_messages = []
for message in messages:
formatted_message = message
if isinstance(message.content, list):
for idx, block in enumerate(message.content):
if (
isinstance(block, dict)
# Subset to (PDF) files and audio, as most relevant chat models
# support images in OAI format (and some may not yet support the
# standard data block format)
and block.get("type") in ("file", "input_audio")
and _is_openai_data_block(block)
):
if formatted_message is message:
formatted_message = message.model_copy()
# Also shallow-copy content
formatted_message.content = list(formatted_message.content)
formatted_message.content[idx] = ( # type: ignore[index] # mypy confused by .model_copy
_convert_openai_format_to_data_block(block)
)
formatted_messages.append(formatted_message)
return formatted_messages
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
from mmdet.registry import METRICS
@METRICS.register_module()
class DumpProposals(BaseMetric):
"""Dump proposals pseudo metric.
Args:
output_dir (str): The root directory for ``proposals_file``.
Defaults to ''.
proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.
num_max_proposals (int, optional): Maximum number of proposals to dump.
If not specified, all proposals will be dumped.
file_client_args (dict, optional): Arguments to instantiate the
corresponding backend in mmdet <= 3.0.0rc6. Defaults to None.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'dump_proposals'
def __init__(self,
output_dir: str = '',
proposals_file: str = 'proposals.pkl',
num_max_proposals: Optional[int] = None,
file_client_args: dict = None,
backend_args: dict = None,
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.num_max_proposals = num_max_proposals
# TODO: update after mmengine finish refactor fileio.
self.backend_args = backend_args
if file_client_args is not None:
raise RuntimeError(
'The `file_client_args` is deprecated, '
'please use `backend_args` instead, please refer to'
'https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/_base_/datasets/coco_detection.py' # noqa: E501
)
self.output_dir = output_dir
assert proposals_file.endswith(('.pkl', '.pickle')), \
'The output file must be a pkl file.'
self.proposals_file = os.path.join(self.output_dir, proposals_file)
if is_main_process():
os.makedirs(self.output_dir, exist_ok=True)
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
# `bboxes` is sorted by `scores`
ranked_scores, rank_inds = pred['scores'].sort(descending=True)
ranked_bboxes = pred['bboxes'][rank_inds, :]
ranked_bboxes = ranked_bboxes.cpu().numpy()
ranked_scores = ranked_scores.cpu().numpy()
pred_instance = InstanceData()
pred_instance.bboxes = ranked_bboxes
pred_instance.scores = ranked_scores
if self.num_max_proposals is not None:
pred_instance = pred_instance[:self.num_max_proposals]
img_path = data_sample['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
result = {file_name: pred_instance}
self.results.append(result)
def compute_metrics(self, results: list) -> dict:
"""Dump the processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: An empty dict.
"""
logger: MMLogger = MMLogger.get_current_instance()
dump_results = {}
for result in results:
dump_results.update(result)
dump(
dump_results,
file=self.proposals_file,
backend_args=self.backend_args)
logger.info(f'Results are saved at {self.proposals_file}')
return {}
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
from typing import Optional, Sequence
from mmengine.dist import is_main_process
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.structures import InstanceData
from mmdet.registry import METRICS
@METRICS.register_module()
class DumpProposals(BaseMetric):
"""Dump proposals pseudo metric.
Args:
output_dir (str): The root directory for ``proposals_file``.
Defaults to ''.
proposals_file (str): Proposals file path. Defaults to 'proposals.pkl'.
num_max_proposals (int, optional): Maximum number of proposals to dump.
If not specified, all proposals will be dumped.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmengine.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
"""
default_prefix: Optional[str] = 'dump_proposals'
def __init__(self,
output_dir: str = '',
proposals_file: str = 'proposals.pkl',
num_max_proposals: Optional[int] = None,
file_client_args: dict = dict(backend='disk'),
collect_device: str = 'cpu',
prefix: Optional[str] = None) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
self.num_max_proposals = num_max_proposals
# TODO: update after mmengine finish refactor fileio.
self.file_client_args = file_client_args
self.output_dir = output_dir
assert proposals_file.endswith(('.pkl', '.pickle')), \
'The output file must be a pkl file.'
self.proposals_file = os.path.join(self.output_dir, proposals_file)
if is_main_process():
os.makedirs(self.output_dir, exist_ok=True)
def process(self, data_batch: Sequence[dict],
data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions. The processed
results should be stored in ``self.results``, which will be used to
compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of data samples that
contain annotations and predictions.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
# `bboxes` is sorted by `scores`
ranked_scores, rank_inds = pred['scores'].sort(descending=True)
ranked_bboxes = pred['bboxes'][rank_inds, :]
ranked_bboxes = ranked_bboxes.cpu().numpy()
ranked_scores = ranked_scores.cpu().numpy()
pred_instance = InstanceData()
pred_instance.bboxes = ranked_bboxes
pred_instance.scores = ranked_scores
if self.num_max_proposals is not None:
pred_instance = pred_instance[:self.num_max_proposals]
img_path = data_sample['img_path']
# `file_name` is the key to obtain the proposals from the
# `proposals_list`.
file_name = osp.join(
osp.split(osp.split(img_path)[0])[-1],
osp.split(img_path)[-1])
result = {file_name: pred_instance}
self.results.append(result)
def compute_metrics(self, results: list) -> dict:
"""Dump the processed results.
Args:
results (list): The processed results of each batch.
Returns:
dict: An empty dict.
"""
logger: MMLogger = MMLogger.get_current_instance()
dump_results = {}
for result in results:
dump_results.update(result)
dump(
dump_results,
file=self.proposals_file,
file_client_args=self.file_client_args)
logger.info(f'Results are saved at {self.proposals_file}')
return {}
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
# or
mmdoc.point_cloud.bytes = mmdoc.point_cloud.url.load_bytes()
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, AnyTensor, PointCloud3DUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
try:
import torch
torch_available = True
except ImportError:
torch_available = False
T = TypeVar('T', bound='PointCloud3D')
class PointCloud3D(BaseDocument):
"""
Document for handling point clouds for 3D data representation.
Point cloud is a representation of a 3D mesh. It is made by repeatedly and uniformly
sampling points within the surface of the 3D body. Compared to the mesh
representation, the point cloud is a fixed size ndarray (shape=(n_samples, 3)) and
hence easier for deep learning algorithms to handle.
A PointCloud3D Document can contain an PointCloud3DUrl (`PointCloud3D.url`), an
AnyTensor (`PointCloud3D.tensor`), and an AnyEmbedding (`PointCloud3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import PointCloud3D
# use it directly
pc = PointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import PointCloud3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyPointCloud3D(PointCloud3D):
second_embedding: Optional[AnyEmbedding]
pc = MyPointCloud3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
pc.tensor = pc.url.load(samples=100)
model = MyEmbeddingModel()
pc.embedding = model(pc.tensor)
pc.second_embedding = model(pc.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import PointCloud3D, Text
# compose it
class MultiModalDoc(BaseDocument):
point_cloud: PointCloud3D
text: Text
mmdoc = MultiModalDoc(
point_cloud=PointCloud3D(
url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.point_cloud.tensor = mmdoc.point_cloud.url.load(samples=100)
"""
url: Optional[PointCloud3DUrl]
tensor: Optional[AnyTensor]
embedding: Optional[AnyEmbedding]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch_available and isinstance(value, torch.Tensor)
):
value = cls(tensor=value)
return super().validate(value)
|
import argparse
import urllib
from abc import ABC
from http import HTTPStatus
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _create_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
@staticmethod
def is_ready(ctrl_address: str, protocol: Optional[str] = 'grpc', **kwargs) -> bool:
"""
Check if status is ready.
:param ctrl_address: the address where the control request needs to be sent
:param protocol: protocol of the gateway runtime
:param kwargs: extra keyword arguments
:return: True if status is ready else False.
"""
if protocol is None or protocol == 'grpc':
res = AsyncNewLoopRuntime.is_ready(ctrl_address)
else:
try:
conn = urllib.request.urlopen(url=f'http://{ctrl_address}')
res = conn.code == HTTPStatus.OK
except:
res = False
return res
@classmethod
def wait_for_ready_or_shutdown(
cls,
timeout: Optional[float],
ready_or_shutdown_event: Union['multiprocessing.Event', 'threading.Event'],
ctrl_address: str,
protocol: Optional[str] = 'grpc',
**kwargs,
):
"""
Check if the runtime has successfully started
:param timeout: The time to wait before readiness or failure is determined
:param ctrl_address: the address where the control message needs to be sent
:param ready_or_shutdown_event: the multiprocessing event to detect if the process failed or is ready
:param protocol: protocol of the gateway runtime
:param kwargs: extra keyword arguments
:return: True if is ready or it needs to be shutdown
"""
return super().wait_for_ready_or_shutdown(
timeout=timeout,
ready_or_shutdown_event=ready_or_shutdown_event,
ctrl_address=ctrl_address,
protocol=protocol,
)
|
import argparse
import urllib
from abc import ABC
from http import HTTPStatus
from typing import TYPE_CHECKING, Optional, Union
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
if TYPE_CHECKING:
import asyncio
import multiprocessing
import threading
class GatewayRuntime(AsyncNewLoopRuntime, ABC):
"""
The Runtime from which the GatewayRuntimes need to inherit
"""
def __init__(
self,
args: argparse.Namespace,
cancel_event: Optional[
Union['asyncio.Event', 'multiprocessing.Event', 'threading.Event']
] = None,
**kwargs,
):
# this order is intentional: The timeout is needed in _create_topology_graph(), called by super
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
super().__init__(args, cancel_event, **kwargs)
@staticmethod
def is_ready(ctrl_address: str, protocol: Optional[str] = 'grpc', **kwargs) -> bool:
"""
Check if status is ready.
:param ctrl_address: the address where the control request needs to be sent
:param protocol: protocol of the gateway runtime
:param kwargs: extra keyword arguments
:return: True if status is ready else False.
"""
if protocol is None or protocol == 'grpc':
res = super().is_ready(ctrl_address)
else:
try:
conn = urllib.request.urlopen(url=f'http://{ctrl_address}')
res = conn.code == HTTPStatus.OK
except:
res = False
return res
@classmethod
def wait_for_ready_or_shutdown(
cls,
timeout: Optional[float],
ready_or_shutdown_event: Union['multiprocessing.Event', 'threading.Event'],
ctrl_address: str,
protocol: Optional[str] = 'grpc',
**kwargs,
):
"""
Check if the runtime has successfully started
:param timeout: The time to wait before readiness or failure is determined
:param ctrl_address: the address where the control message needs to be sent
:param ready_or_shutdown_event: the multiprocessing event to detect if the process failed or is ready
:param protocol: protocol of the gateway runtime
:param kwargs: extra keyword arguments
:return: True if is ready or it needs to be shutdown
"""
return super().wait_for_ready_or_shutdown(
timeout=timeout,
ready_or_shutdown_event=ready_or_shutdown_event,
ctrl_address=ctrl_address,
protocol=protocol,
)
|
# Credit to https://github.com/openai/evals/tree/main
from langchain_core.prompts import PromptTemplate
template = """You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data:
[BEGIN DATA]
***
[Input]: {input}
***
[Submission]: {output}
***
[Criteria]: {criteria}
***
[END DATA]
Does the submission meet the Criteria? First, write out in a step by step manner your reasoning about each criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character "Y" or "N" (without quotes or punctuation) on its own line corresponding to the correct answer of whether the submission meets all criteria. At the end, repeat just the letter again by itself on a new line.""" # noqa: E501
PROMPT = PromptTemplate(
input_variables=["input", "output", "criteria"], template=template
)
template = """You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data:
[BEGIN DATA]
***
[Input]: {input}
***
[Submission]: {output}
***
[Criteria]: {criteria}
***
[Reference]: {reference}
***
[END DATA]
Does the submission meet the Criteria? First, write out in a step by step manner your reasoning about each criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character "Y" or "N" (without quotes or punctuation) on its own line corresponding to the correct answer of whether the submission meets all criteria. At the end, repeat just the letter again by itself on a new line.""" # noqa: E501
PROMPT_WITH_REFERENCES = PromptTemplate(
input_variables=["input", "output", "criteria", "reference"], template=template
)
|
# flake8: noqa
# Credit to https://github.com/openai/evals/tree/main
from langchain_core.prompts import PromptTemplate
template = """You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data:
[BEGIN DATA]
***
[Input]: {input}
***
[Submission]: {output}
***
[Criteria]: {criteria}
***
[END DATA]
Does the submission meet the Criteria? First, write out in a step by step manner your reasoning about each criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character "Y" or "N" (without quotes or punctuation) on its own line corresponding to the correct answer of whether the submission meets all criteria. At the end, repeat just the letter again by itself on a new line."""
PROMPT = PromptTemplate(
input_variables=["input", "output", "criteria"], template=template
)
template = """You are assessing a submitted answer on a given task or input based on a set of criteria. Here is the data:
[BEGIN DATA]
***
[Input]: {input}
***
[Submission]: {output}
***
[Criteria]: {criteria}
***
[Reference]: {reference}
***
[END DATA]
Does the submission meet the Criteria? First, write out in a step by step manner your reasoning about each criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print only the single character "Y" or "N" (without quotes or punctuation) on its own line corresponding to the correct answer of whether the submission meets all criteria. At the end, repeat just the letter again by itself on a new line."""
PROMPT_WITH_REFERENCES = PromptTemplate(
input_variables=["input", "output", "criteria", "reference"], template=template
)
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='str', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='int', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for i in range(len(columns)):
columns[i] = (
columns[i][0],
self._map_type(columns[i][1]),
)
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
from dataclasses import dataclass, asdict, field
from typing import (
Union,
Dict,
Optional,
TYPE_CHECKING,
Iterable,
List,
Tuple,
)
import numpy as np
from docarray.array.storage.base.backend import BaseBackendMixin, TypeMap
from docarray.helper import dataclass_from_dict, filter_dict, _safe_cast_int
if TYPE_CHECKING:
from docarray.typing import DocumentArraySourceType, ArrayType
@dataclass
class AnnliteConfig:
n_dim: int
metric: str = 'cosine'
serialize_config: Dict = field(default_factory=dict)
data_path: Optional[str] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_connection: Optional[int] = None
columns: Optional[List[Tuple[str, str]]] = None
class BackendMixin(BaseBackendMixin):
"""Provide necessary functions to enable this storage backend."""
TYPE_MAP = {
'str': TypeMap(type='TEXT', converter=str),
'float': TypeMap(type='float', converter=float),
'int': TypeMap(type='integer', converter=_safe_cast_int),
}
def _map_embedding(self, embedding: 'ArrayType') -> 'ArrayType':
if embedding is None:
embedding = np.zeros(self.n_dim, dtype=np.float32)
elif isinstance(embedding, list):
from docarray.math.ndarray import to_numpy_array
embedding = to_numpy_array(embedding)
if embedding.ndim > 1:
embedding = np.asarray(embedding).squeeze()
return embedding
def _normalize_columns(self, columns):
columns = super()._normalize_columns(columns)
for i in range(len(columns)):
columns[i] = (
columns[i][0],
self._map_type(columns[i][1]),
)
return columns
def _ensure_unique_config(
self,
config_root: dict,
config_subindex: dict,
config_joined: dict,
subindex_name: str,
) -> dict:
import os
if 'data_path' not in config_subindex:
config_joined['data_path'] = os.path.join(
config_joined['data_path'], 'subindex_' + subindex_name
)
return config_joined
def _init_storage(
self,
_docs: Optional['DocumentArraySourceType'] = None,
config: Optional[Union[AnnliteConfig, Dict]] = None,
subindex_configs: Optional[Dict] = None,
**kwargs,
):
from docarray import Document
if not config:
raise ValueError('Config object must be specified')
elif isinstance(config, dict):
config = dataclass_from_dict(AnnliteConfig, config)
self._persist = bool(config.data_path)
if not self._persist:
from tempfile import TemporaryDirectory
config.data_path = TemporaryDirectory().name
self._config = config
self._config.columns = self._normalize_columns(self._config.columns)
config = asdict(config)
self.n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(self.n_dim, lock=False, **filter_dict(config))
super()._init_storage()
if _docs is None:
return
self.clear()
if isinstance(_docs, Iterable):
self.extend(_docs)
elif isinstance(_docs, Document):
self.append(_docs)
def __getstate__(self):
state = dict(self.__dict__)
del state['_annlite']
del state['_offsetmapping']
return state
def __setstate__(self, state):
self.__dict__ = state
config = state['_config']
config = asdict(config)
n_dim = config.pop('n_dim')
from annlite import AnnLite
self._annlite = AnnLite(n_dim, lock=False, **filter_dict(config))
def __len__(self):
return self._annlite.index_size
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar, Optional
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[ProviderName]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return self._refresh_tokens(credentials)
def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(
f"Using default scopes for provider {self.PROVIDER_NAME.value}"
)
scopes = self.DEFAULT_SCOPES
return scopes
|
import logging
import time
from abc import ABC, abstractmethod
from typing import ClassVar
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
logger = logging.getLogger(__name__)
class BaseOAuthHandler(ABC):
# --8<-- [start:BaseOAuthHandler1]
PROVIDER_NAME: ClassVar[ProviderName]
DEFAULT_SCOPES: ClassVar[list[str]] = []
# --8<-- [end:BaseOAuthHandler1]
@abstractmethod
# --8<-- [start:BaseOAuthHandler2]
def __init__(self, client_id: str, client_secret: str, redirect_uri: str): ...
# --8<-- [end:BaseOAuthHandler2]
@abstractmethod
# --8<-- [start:BaseOAuthHandler3]
def get_login_url(self, scopes: list[str], state: str) -> str:
# --8<-- [end:BaseOAuthHandler3]
"""Constructs a login URL that the user can be redirected to"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler4]
def exchange_code_for_tokens(
self, code: str, scopes: list[str]
) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler4]
"""Exchanges the acquired authorization code from login for a set of tokens"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler5]
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# --8<-- [end:BaseOAuthHandler5]
"""Implements the token refresh mechanism"""
...
@abstractmethod
# --8<-- [start:BaseOAuthHandler6]
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# --8<-- [end:BaseOAuthHandler6]
"""Revokes the given token at provider,
returns False provider does not support it"""
...
def refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
if credentials.provider != self.PROVIDER_NAME:
raise ValueError(
f"{self.__class__.__name__} can not refresh tokens "
f"for other provider '{credentials.provider}'"
)
return self._refresh_tokens(credentials)
def get_access_token(self, credentials: OAuth2Credentials) -> str:
"""Returns a valid access token, refreshing it first if needed"""
if self.needs_refresh(credentials):
credentials = self.refresh_tokens(credentials)
return credentials.access_token.get_secret_value()
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
"""Indicates whether the given tokens need to be refreshed"""
return (
credentials.access_token_expires_at is not None
and credentials.access_token_expires_at < int(time.time()) + 300
)
def handle_default_scopes(self, scopes: list[str]) -> list[str]:
"""Handles the default scopes for the provider"""
# If scopes are empty, use the default scopes for the provider
if not scopes:
logger.debug(
f"Using default scopes for provider {self.PROVIDER_NAME.value}"
)
scopes = self.DEFAULT_SCOPES
return scopes
|
from .objective import squim_objective_base, squim_objective_model, SquimObjective
from .subjective import squim_subjective_base, squim_subjective_model, SquimSubjective
__all__ = [
"squim_objective_base",
"squim_objective_model",
"squim_subjective_base",
"squim_subjective_model",
"SquimObjective",
"SquimSubjective",
]
|
from .objective import squim_objective_base, squim_objective_model, SquimObjective
__all__ = [
"squim_objective_base",
"squim_objective_model",
"SquimObjective",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
from dataclasses import dataclass, fields, field
from typing import Optional, Tuple, TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
from docarray.score import NamedScore
default_values = dict(value=0.0, op_name='', description='', ref_id='')
@dataclass(unsafe_hash=True)
class NamedScoreData:
_reference_ns: 'NamedScore' = field(hash=False, compare=False)
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
@property
def _non_empty_fields(self) -> Tuple[str]:
r = []
for f in fields(self):
f_name = f.name
if not f_name.startswith('_'):
v = getattr(self, f_name)
if v is not None:
r.append(f_name)
return tuple(r)
def _set_default_value_if_none(self, key):
if getattr(self, key) is None:
setattr(self, key, default_values[key])
|
from dataclasses import dataclass, fields, field
from typing import Optional, Tuple, TYPE_CHECKING
if TYPE_CHECKING:
from docarray.score import NamedScore
default_values = dict(value=0.0, op_name='', description='', ref_id='')
@dataclass(unsafe_hash=True)
class NamedScoreData:
_reference_ns: 'NamedScore' = field(hash=False, compare=False)
value: Optional[float] = None
op_name: Optional[str] = None
description: Optional[str] = None
ref_id: Optional[str] = None
@property
def _non_empty_fields(self) -> Tuple[str]:
r = []
for f in fields(self):
f_name = f.name
if not f_name.startswith('_'):
v = getattr(self, f_name)
if v is not None:
r.append(f_name)
return tuple(r)
def _set_default_value_if_none(self, key):
if getattr(self, key) is None:
setattr(self, key, default_values[key])
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import FileClient, dump
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
file_client = FileClient(backend='disk')
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
file_client = mmcv.FileClient(backend='disk')
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
mmcv.dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
|
from typing import Type
from .doc import BaseDoc
class AnyDoc(BaseDoc):
"""
AnyDoc is a Document that is not tied to any schema
"""
class Config:
_load_extra_fields_from_protobuf = True # I introduce this variable to allow to load more that the fields defined in the schema
# will documented this behavior later if this fix our problem
def __init__(self, **kwargs):
super().__init__()
self.__dict__.update(kwargs)
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDoc']:
"""
Accessing the nested python Class define in the schema.
Could be useful for reconstruction of Document in
serialization/deserilization
:param field: name of the field
:return:
"""
return AnyDoc
@classmethod
def _get_field_type_array(cls, field: str) -> Type:
from docarray import DocList
return DocList
|
from typing import Type
from .doc import BaseDoc
class AnyDoc(BaseDoc):
"""
AnyDoc is a Document that is not tied to any schema
"""
def __init__(self, **kwargs):
super().__init__()
self.__dict__.update(kwargs)
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDoc']:
"""
Accessing the nested python Class define in the schema.
Could be useful for reconstruction of Document in
serialization/deserilization
:param field: name of the field
:return:
"""
return AnyDoc
|
import logging
from typing import Any, List
import requests
from llama_index.core.base.embeddings.base import BaseEmbedding
from requests.adapters import HTTPAdapter, Retry
logger = logging.getLogger(__name__)
class LLMRailsEmbedding(BaseEmbedding):
"""
LLMRails embedding models.
This class provides an interface to generate embeddings using a model deployed
in an LLMRails cluster. It requires a model_id of the model deployed in the cluster and api key you can obtain
from https://console.llmrails.com/api-keys.
"""
model_id: str
api_key: str
session: requests.Session
@classmethod
def class_name(self) -> str:
return "LLMRailsEmbedding"
def __init__(
self,
api_key: str,
model_id: str = "embedding-english-v1", # or embedding-multi-v1
**kwargs: Any,
):
retry = Retry(
total=3,
connect=3,
read=2,
allowed_methods=["POST"],
backoff_factor=2,
status_forcelist=[502, 503, 504],
)
session = requests.Session()
session.mount("https://api.llmrails.com", HTTPAdapter(max_retries=retry))
session.headers = {"X-API-KEY": api_key}
super().__init__(model_id=model_id, api_key=api_key, session=session, **kwargs)
def _get_embedding(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
try:
response = self.session.post(
"https://api.llmrails.com/v1/embeddings",
json={"input": [text], "model": self.model_id},
)
response.raise_for_status()
return response.json()["data"][0]["embedding"]
except requests.exceptions.HTTPError as e:
logger.error(f"Error while embedding text {e}.")
raise ValueError(f"Unable to embed given text {e}")
async def _aget_embedding(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
try:
import httpx
except ImportError:
raise ImportError(
"The httpx library is required to use the async version of "
"this function. Install it with `pip install httpx`."
)
try:
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.llmrails.com/v1/embeddings",
headers={"X-API-KEY": self.api_key},
json={"input": [text], "model": self.model_id},
)
response.raise_for_status()
return response.json()["data"][0]["embedding"]
except httpx._exceptions.HTTPError as e:
logger.error(f"Error while embedding text {e}.")
raise ValueError(f"Unable to embed given text {e}")
def _get_text_embedding(self, text: str) -> List[float]:
return self._get_embedding(text)
def _get_query_embedding(self, query: str) -> List[float]:
return self._get_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
return await self._aget_embedding(query)
async def _aget_text_embedding(self, query: str) -> List[float]:
return await self._aget_embedding(query)
LLMRailsEmbeddings = LLMRailsEmbedding
|
import logging
from typing import Any, List
import requests
from llama_index.core.base.embeddings.base import BaseEmbedding
from requests.adapters import HTTPAdapter, Retry
logger = logging.getLogger(__name__)
class LLMRailsEmbedding(BaseEmbedding):
"""LLMRails embedding models.
This class provides an interface to generate embeddings using a model deployed
in an LLMRails cluster. It requires a model_id of the model deployed in the cluster and api key you can obtain
from https://console.llmrails.com/api-keys.
"""
model_id: str
api_key: str
session: requests.Session
@classmethod
def class_name(self) -> str:
return "LLMRailsEmbedding"
def __init__(
self,
api_key: str,
model_id: str = "embedding-english-v1", # or embedding-multi-v1
**kwargs: Any,
):
retry = Retry(
total=3,
connect=3,
read=2,
allowed_methods=["POST"],
backoff_factor=2,
status_forcelist=[502, 503, 504],
)
session = requests.Session()
session.mount("https://api.llmrails.com", HTTPAdapter(max_retries=retry))
session.headers = {"X-API-KEY": api_key}
super().__init__(model_id=model_id, api_key=api_key, session=session, **kwargs)
def _get_embedding(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
try:
response = self.session.post(
"https://api.llmrails.com/v1/embeddings",
json={"input": [text], "model": self.model_id},
)
response.raise_for_status()
return response.json()["data"][0]["embedding"]
except requests.exceptions.HTTPError as e:
logger.error(f"Error while embedding text {e}.")
raise ValueError(f"Unable to embed given text {e}")
async def _aget_embedding(self, text: str) -> List[float]:
"""
Generate an embedding for a single query text.
Args:
text (str): The query text to generate an embedding for.
Returns:
List[float]: The embedding for the input query text.
"""
try:
import httpx
except ImportError:
raise ImportError(
"The httpx library is required to use the async version of "
"this function. Install it with `pip install httpx`."
)
try:
async with httpx.AsyncClient() as client:
response = await client.post(
"https://api.llmrails.com/v1/embeddings",
headers={"X-API-KEY": self.api_key},
json={"input": [text], "model": self.model_id},
)
response.raise_for_status()
return response.json()["data"][0]["embedding"]
except httpx._exceptions.HTTPError as e:
logger.error(f"Error while embedding text {e}.")
raise ValueError(f"Unable to embed given text {e}")
def _get_text_embedding(self, text: str) -> List[float]:
return self._get_embedding(text)
def _get_query_embedding(self, query: str) -> List[float]:
return self._get_embedding(query)
async def _aget_query_embedding(self, query: str) -> List[float]:
return await self._aget_embedding(query)
async def _aget_text_embedding(self, query: str) -> List[float]:
return await self._aget_embedding(query)
LLMRailsEmbeddings = LLMRailsEmbedding
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.