input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import collections
import csv
import numpy as np
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import file_utils
@keras_export("keras.callbacks.CSVLogger")
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
"""
def __init__(self, filename, separator=",", append=False):
super().__init__()
self.sep = separator
self.filename = file_utils.path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
self.csv_file = None
def on_train_begin(self, logs=None):
if self.append:
if file_utils.exists(self.filename):
with file_utils.File(self.filename, "r") as f:
self.append_header = not bool(len(f.readline()))
mode = "a"
else:
mode = "w"
# ensure csv_file is None or closed before reassigning
if self.csv_file and not self.csv_file.closed:
self.csv_file.close()
self.csv_file = file_utils.File(self.filename, mode)
# Reset writer and keys
self.writer = None
self.keys = None
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, str):
return k
elif (
isinstance(k, collections.abc.Iterable)
and not is_zero_dim_ndarray
):
return f'"[{", ".join(map(str, k))}]"'
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
val_keys_found = False
for key in self.keys:
if key.startswith("val_"):
val_keys_found = True
break
if not val_keys_found and self.keys:
self.keys.extend(["val_" + k for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ["epoch"] + (self.keys or [])
self.writer = csv.DictWriter(
self.csv_file, fieldnames=fieldnames, dialect=CustomDialect
)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({"epoch": epoch})
row_dict.update(
(key, handle_value(logs.get(key, "NA"))) for key in self.keys
)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
if self.csv_file and not self.csv_file.closed:
self.csv_file.close()
self.writer = None
|
import collections
import csv
import numpy as np
from keras.src.api_export import keras_export
from keras.src.callbacks.callback import Callback
from keras.src.utils import file_utils
@keras_export("keras.callbacks.CSVLogger")
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
"""
def __init__(self, filename, separator=",", append=False):
super().__init__()
self.sep = separator
self.filename = file_utils.path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
def on_train_begin(self, logs=None):
if self.append:
if file_utils.exists(self.filename):
with file_utils.File(self.filename, "r") as f:
self.append_header = not bool(len(f.readline()))
mode = "a"
else:
mode = "w"
self.csv_file = file_utils.File(self.filename, mode)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, str):
return k
elif (
isinstance(k, collections.abc.Iterable)
and not is_zero_dim_ndarray
):
return f'"[{", ".join(map(str, k))}]"'
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
# When validation_freq > 1, `val_` keys are not in first epoch logs
# Add the `val_` keys so that its part of the fieldnames of writer.
val_keys_found = False
for key in self.keys:
if key.startswith("val_"):
val_keys_found = True
break
if not val_keys_found:
self.keys.extend(["val_" + k for k in self.keys])
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ["epoch"] + self.keys
self.writer = csv.DictWriter(
self.csv_file, fieldnames=fieldnames, dialect=CustomDialect
)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({"epoch": epoch})
row_dict.update(
(key, handle_value(logs.get(key, "NA"))) for key in self.keys
)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import RocksetChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RocksetChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RocksetChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import RocksetChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"RocksetChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"RocksetChatMessageHistory",
]
|
"""Test ZhipuAI Text Embedding."""
from langchain_community.embeddings.zhipuai import ZhipuAIEmbeddings
def test_zhipuai_embedding_documents() -> None:
"""Test ZhipuAI Text Embedding for documents."""
documents = ["This is a test query1.", "This is a test query2."]
embedding = ZhipuAIEmbeddings() # type: ignore[call-arg]
res = embedding.embed_documents(documents)
assert len(res) == 2
assert len(res[0]) == 1024
def test_zhipuai_embedding_query() -> None:
"""Test ZhipuAI Text Embedding for query."""
document = "This is a test query."
embedding = ZhipuAIEmbeddings() # type: ignore[call-arg]
res = embedding.embed_query(document)
assert len(res) == 1024
def test_zhipuai_embedding_dimensions() -> None:
"""Test ZhipuAI Text Embedding for query by assigning dimensions"""
document = "This is a test query."
embedding = ZhipuAIEmbeddings(
model="embedding-3",
dimensions=2048,
) # type: ignore[call-arg]
res = embedding.embed_query(document)
assert len(res) == 2048
|
"""Test ZhipuAI Text Embedding."""
from langchain_community.embeddings.zhipuai import ZhipuAIEmbeddings
def test_zhipuai_embedding_documents() -> None:
"""Test ZhipuAI Text Embedding for documents."""
documents = ["This is a test query1.", "This is a test query2."]
embedding = ZhipuAIEmbeddings() # type: ignore[call-arg]
res = embedding.embed_documents(documents)
assert len(res) == 2 # type: ignore[arg-type]
assert len(res[0]) == 1024 # type: ignore[index]
def test_zhipuai_embedding_query() -> None:
"""Test ZhipuAI Text Embedding for query."""
document = "This is a test query."
embedding = ZhipuAIEmbeddings() # type: ignore[call-arg]
res = embedding.embed_query(document)
assert len(res) == 1024 # type: ignore[arg-type]
def test_zhipuai_embedding_dimensions() -> None:
"""Test ZhipuAI Text Embedding for query by assigning dimensions"""
document = "This is a test query."
embedding = ZhipuAIEmbeddings(
model="embedding-3",
dimensions=2048,
) # type: ignore[call-arg]
res = embedding.embed_query(document)
assert len(res) == 2048 # type: ignore[arg-type]
|
from __future__ import annotations
from typing import Optional
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.qat as nnqat
import torch.nn.functional as F
from torch.ao.nn.intrinsic.modules.fused import _FusedModule
__all__ = ["LinearReLU"]
class LinearReLU(nnqat.Linear, _FusedModule):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.ao.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
weight: fake quant module for weight
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = nni.LinearReLU
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
qconfig: Optional[object] = None,
) -> None:
super().__init__(in_features, out_features, bias, qconfig)
def forward(self, input: torch.Tensor) -> torch.Tensor:
return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
def from_float(
cls,
mod: torch.nn.Module,
use_precomputed_fake_quant: bool = False,
) -> LinearReLU:
return super().from_float(mod, use_precomputed_fake_quant) # type: ignore[no-untyped-call,no-any-return]
def to_float(self) -> nni.LinearReLU:
linear = torch.nn.Linear(
self.in_features, self.out_features, self.bias is not None
)
linear.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
linear.bias = torch.nn.Parameter(self.bias.detach())
relu = torch.nn.ReLU()
return torch.ao.nn.intrinsic.LinearReLU(linear, relu) # type: ignore[no-untyped-call]
|
# mypy: allow-untyped-defs
import torch
import torch.ao.nn.intrinsic as nni
import torch.ao.nn.qat as nnqat
import torch.nn.functional as F
class LinearReLU(nnqat.Linear, nni._FusedModule):
r"""
A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for weight, used in
quantization aware training.
We adopt the same interface as :class:`torch.nn.Linear`.
Similar to `torch.ao.nn.intrinsic.LinearReLU`, with FakeQuantize modules initialized to
default.
Attributes:
weight: fake quant module for weight
Examples::
>>> # xdoctest: +SKIP
>>> m = nn.qat.LinearReLU(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
_FLOAT_MODULE = nni.LinearReLU # type: ignore[assignment]
def __init__(self, in_features, out_features, bias=True, qconfig=None):
super().__init__(in_features, out_features, bias, qconfig)
def forward(self, input):
return F.relu(F.linear(input, self.weight_fake_quant(self.weight), self.bias))
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
return super().from_float(mod, use_precomputed_fake_quant)
def to_float(self):
linear = torch.nn.Linear(
self.in_features, self.out_features, self.bias is not None
)
linear.weight = torch.nn.Parameter(self.weight.detach())
if self.bias is not None:
linear.bias = torch.nn.Parameter(self.bias.detach())
relu = torch.nn.ReLU()
return torch.ao.nn.intrinsic.LinearReLU(linear, relu)
|
from typing import Any, Dict, List, Optional, Union
from docarray.utils.query_language.lookup import (
LookupLeaf,
LookupNode,
LookupTreeElem,
Q,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(
data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None
) -> Optional[LookupTreeElem]:
if isinstance(data, dict):
for key, value in data.items():
node: Optional[LookupTreeElem] = None
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet,'
f' please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}.{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, '
f'please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Union[Dict, List] = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: Any) -> bool:
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: Any) -> bool:
return self.evaluate(doc)
|
from typing import Dict, Any, Optional, Union, List
from docarray.utils.query_language.lookup import (
Q,
LookupNode,
LookupLeaf,
LookupTreeElem,
)
LOGICAL_OPERATORS: Dict[str, Union[str, bool]] = {
'$and': 'and',
'$or': 'or',
'$not': True,
}
COMPARISON_OPERATORS = {
'$lt': 'lt',
'$gt': 'gt',
'$lte': 'lte',
'$gte': 'gte',
'$eq': 'exact',
'$neq': 'neq',
'$exists': 'exists',
}
REGEX_OPERATORS = {'$regex': 'regex'}
ARRAY_OPERATORS = {'$size': 'size'}
MEMBERSHIP_OPERATORS = {'$in': 'in', '$nin': 'nin'}
SUPPORTED_OPERATORS = {
**COMPARISON_OPERATORS,
**ARRAY_OPERATORS,
**REGEX_OPERATORS,
**MEMBERSHIP_OPERATORS,
}
def _parse_lookups(
data: Union[Dict, List] = {}, root_node: Optional[LookupTreeElem] = None
) -> Optional[LookupTreeElem]:
if isinstance(data, dict):
for key, value in data.items():
node: Optional[LookupTreeElem] = None
if isinstance(root_node, LookupLeaf):
root = LookupNode()
root.add_child(root_node)
root_node = root
if key in LOGICAL_OPERATORS:
if key == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[key])
node = _parse_lookups(value, root_node=node)
elif key.startswith('$'):
raise ValueError(
f'The operator {key} is not supported yet,'
f' please double check the given filters!'
)
else:
if not value or not isinstance(value, dict):
raise ValueError(
'''Not a valid query. It should follow the format:
{ <field1>: { <operator1>: <value1> }, ... }
'''
)
items = list(value.items())
if len(items) == 1:
op, val = items[0]
if op in LOGICAL_OPERATORS:
if op == '$not':
node = LookupNode(negate=True)
else:
node = LookupNode(op=LOGICAL_OPERATORS[op])
node = _parse_lookups(val, root_node=node)
elif op in SUPPORTED_OPERATORS:
node = Q(**{f'{key}__{SUPPORTED_OPERATORS[op]}': val})
else:
raise ValueError(
f'The operator {op} is not supported yet, '
f'please double check the given filters!'
)
else:
node = LookupNode()
for op, val in items:
_node = _parse_lookups({key: {op: val}})
node.add_child(_node)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
elif isinstance(data, list):
for d in data:
node = _parse_lookups(d)
if root_node and node:
if isinstance(root_node, LookupNode):
root_node.add_child(node)
elif node:
root_node = node
else:
raise ValueError(f'The query is illegal: `{data}`')
return root_node
class QueryParser:
"""A class to parse dict condition to lookup query."""
def __init__(self, conditions: Union[Dict, List] = {}):
self.conditions = conditions
self.lookup_groups = _parse_lookups(self.conditions)
def evaluate(self, doc: Any) -> bool:
return self.lookup_groups.evaluate(doc) if self.lookup_groups else True
def __call__(self, doc: Any) -> bool:
return self.evaluate(doc)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
|
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import NodeExecutionResult
from backend.executor import ExecutionManager
from backend.server.external.middleware import require_permission
from backend.util.service import get_service_client
from backend.util.settings import Settings
@thread_cached
def execution_manager_client() -> ExecutionManager:
return get_service_client(ExecutionManager)
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: list[NodeExecutionResult]) -> list[dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = execution_manager_client().add_execution(
graph_id,
graph_version=graph_version,
data=node_input,
user_id=api_key.user_id,
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_node_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
import logging
from collections import defaultdict
from typing import Annotated, Any, Dict, List, Optional, Sequence
from autogpt_libs.utils.cache import thread_cached
from fastapi import APIRouter, Body, Depends, HTTPException
from prisma.enums import AgentExecutionStatus, APIKeyPermission
from typing_extensions import TypedDict
import backend.data.block
from backend.data import execution as execution_db
from backend.data import graph as graph_db
from backend.data.api_key import APIKey
from backend.data.block import BlockInput, CompletedBlockOutput
from backend.data.execution import ExecutionResult
from backend.executor import ExecutionManager
from backend.server.external.middleware import require_permission
from backend.util.service import get_service_client
from backend.util.settings import Settings
@thread_cached
def execution_manager_client() -> ExecutionManager:
return get_service_client(ExecutionManager)
settings = Settings()
logger = logging.getLogger(__name__)
v1_router = APIRouter()
class NodeOutput(TypedDict):
key: str
value: Any
class ExecutionNode(TypedDict):
node_id: str
input: Any
output: Dict[str, Any]
class ExecutionNodeOutput(TypedDict):
node_id: str
outputs: List[NodeOutput]
class GraphExecutionResult(TypedDict):
execution_id: str
status: str
nodes: List[ExecutionNode]
output: Optional[List[Dict[str, str]]]
def get_outputs_with_names(results: List[ExecutionResult]) -> List[Dict[str, str]]:
outputs = []
for result in results:
if "output" in result.output_data:
output_value = result.output_data["output"][0]
name = result.output_data.get("name", [None])[0]
if output_value and name:
outputs.append({name: output_value})
return outputs
@v1_router.get(
path="/blocks",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.READ_BLOCK))],
)
def get_graph_blocks() -> Sequence[dict[Any, Any]]:
blocks = [block() for block in backend.data.block.get_blocks().values()]
return [b.to_dict() for b in blocks if not b.disabled]
@v1_router.post(
path="/blocks/{block_id}/execute",
tags=["blocks"],
dependencies=[Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK))],
)
def execute_graph_block(
block_id: str,
data: BlockInput,
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_BLOCK)),
) -> CompletedBlockOutput:
obj = backend.data.block.get_block(block_id)
if not obj:
raise HTTPException(status_code=404, detail=f"Block #{block_id} not found.")
output = defaultdict(list)
for name, data in obj.execute(data):
output[name].append(data)
return output
@v1_router.post(
path="/graphs/{graph_id}/execute/{graph_version}",
tags=["graphs"],
)
def execute_graph(
graph_id: str,
graph_version: int,
node_input: Annotated[dict[str, Any], Body(..., embed=True, default_factory=dict)],
api_key: APIKey = Depends(require_permission(APIKeyPermission.EXECUTE_GRAPH)),
) -> dict[str, Any]:
try:
graph_exec = execution_manager_client().add_execution(
graph_id,
graph_version=graph_version,
data=node_input,
user_id=api_key.user_id,
)
return {"id": graph_exec.graph_exec_id}
except Exception as e:
msg = str(e).encode().decode("unicode_escape")
raise HTTPException(status_code=400, detail=msg)
@v1_router.get(
path="/graphs/{graph_id}/executions/{graph_exec_id}/results",
tags=["graphs"],
)
async def get_graph_execution_results(
graph_id: str,
graph_exec_id: str,
api_key: APIKey = Depends(require_permission(APIKeyPermission.READ_GRAPH)),
) -> GraphExecutionResult:
graph = await graph_db.get_graph(graph_id, user_id=api_key.user_id)
if not graph:
raise HTTPException(status_code=404, detail=f"Graph #{graph_id} not found.")
results = await execution_db.get_execution_results(graph_exec_id)
last_result = results[-1] if results else None
execution_status = (
last_result.status if last_result else AgentExecutionStatus.INCOMPLETE
)
outputs = get_outputs_with_names(results)
return GraphExecutionResult(
execution_id=graph_exec_id,
status=execution_status,
nodes=[
ExecutionNode(
node_id=result.node_id,
input=result.input_data.get("value", result.input_data),
output={k: v for k, v in result.output_data.items()},
)
for result in results
],
output=outputs if execution_status == AgentExecutionStatus.COMPLETED else None,
)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='NASFCOSHead',
num_classes=80,
in_channels=256,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=2)
# optimizer
optim_wrapper = dict(
optimizer=dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='NASFCOSHead',
num_classes=80,
in_channels=256,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=2)
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
|
"""Test HuggingFace embeddings."""
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_huggingface.embeddings import (
HuggingFaceEmbeddings,
HuggingFaceEndpointEmbeddings,
)
class TestHuggingFaceEmbeddings(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> type[HuggingFaceEmbeddings]:
return HuggingFaceEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"model_name": "sentence-transformers/all-mpnet-base-v2"}
class TestHuggingFaceEndpointEmbeddings(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> type[HuggingFaceEndpointEmbeddings]:
return HuggingFaceEndpointEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "sentence-transformers/all-mpnet-base-v2"}
|
"""Test HuggingFace embeddings."""
from typing import Type
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_huggingface.embeddings import (
HuggingFaceEmbeddings,
HuggingFaceEndpointEmbeddings,
)
class TestHuggingFaceEmbeddings(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> Type[HuggingFaceEmbeddings]:
return HuggingFaceEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"model_name": "sentence-transformers/all-mpnet-base-v2"}
class TestHuggingFaceEndpointEmbeddings(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> Type[HuggingFaceEndpointEmbeddings]:
return HuggingFaceEndpointEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "sentence-transformers/all-mpnet-base-v2"}
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import (ImageToTensor, PackDetInputs, PackReIDInputs,
PackTrackInputs, ToTensor, Transpose)
from .frame_sampling import BaseFrameSample, UniformRefFrameSample
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals, LoadTrackAnnotations)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixScaleResize, FixShapeResize,
MinIoURandomCrop, MixUp, Mosaic, Pad,
PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, ResizeShortestEdge,
SegRescale, YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader',
'LoadTrackAnnotations', 'BaseFrameSample', 'UniformRefFrameSample',
'PackTrackInputs', 'PackReIDInputs', 'FixScaleResize', 'ResizeShortestEdge'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .augment_wrappers import AutoAugment, RandAugment
from .colorspace import (AutoContrast, Brightness, Color, ColorTransform,
Contrast, Equalize, Invert, Posterize, Sharpness,
Solarize, SolarizeAdd)
from .formatting import ImageToTensor, PackDetInputs, ToTensor, Transpose
from .geometric import (GeomTransform, Rotate, ShearX, ShearY, TranslateX,
TranslateY)
from .instaboost import InstaBoost
from .loading import (FilterAnnotations, InferencerLoader, LoadAnnotations,
LoadEmptyAnnotations, LoadImageFromNDArray,
LoadMultiChannelImageFromFiles, LoadPanopticAnnotations,
LoadProposals)
from .transforms import (Albu, CachedMixUp, CachedMosaic, CopyPaste, CutOut,
Expand, FixShapeResize, MinIoURandomCrop, MixUp,
Mosaic, Pad, PhotoMetricDistortion, RandomAffine,
RandomCenterCropPad, RandomCrop, RandomErasing,
RandomFlip, RandomShift, Resize, SegRescale,
YOLOXHSVRandomAug)
from .wrappers import MultiBranch, ProposalBroadcaster, RandomOrder
__all__ = [
'PackDetInputs', 'ToTensor', 'ImageToTensor', 'Transpose',
'LoadImageFromNDArray', 'LoadAnnotations', 'LoadPanopticAnnotations',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'Resize', 'RandomFlip',
'RandomCrop', 'SegRescale', 'MinIoURandomCrop', 'Expand',
'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',
'AutoAugment', 'CutOut', 'ShearX', 'ShearY', 'Rotate', 'Color', 'Equalize',
'Brightness', 'Contrast', 'TranslateX', 'TranslateY', 'RandomShift',
'Mosaic', 'MixUp', 'RandomAffine', 'YOLOXHSVRandomAug', 'CopyPaste',
'FilterAnnotations', 'Pad', 'GeomTransform', 'ColorTransform',
'RandAugment', 'Sharpness', 'Solarize', 'SolarizeAdd', 'Posterize',
'AutoContrast', 'Invert', 'MultiBranch', 'RandomErasing',
'LoadEmptyAnnotations', 'RandomOrder', 'CachedMosaic', 'CachedMixUp',
'FixShapeResize', 'ProposalBroadcaster', 'InferencerLoader'
]
|
from typing import Union, Iterable, Dict, List
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
@staticmethod
def _parse_index_ids_from_bulk_info(
accumulated_info: List[Dict],
) -> Dict[str, List[int]]:
"""Parse ids from bulk info of failed send request to ES operation
:param accumulated_info: accumulated info of failed operation
:return: dict containing failed index ids of each operation type
"""
parsed_ids = {}
for info in accumulated_info:
for _op_type in info.keys():
if '_id' in info[_op_type]:
if _op_type not in parsed_ids:
parsed_ids[_op_type] = []
parsed_ids[_op_type].append(info[_op_type]['_id'])
return parsed_ids
def _upload_batch(self, docs: Iterable['Document']) -> List[int]:
batch = []
accumulated_info = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
accumulated_info.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
accumulated_info.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
successful_ids = self._parse_index_ids_from_bulk_info(accumulated_info)
if 'index' not in successful_ids:
return []
return successful_ids['index']
def extend(self, docs: Iterable['Document']):
docs = list(docs)
successful_indexed_ids = self._upload_batch(docs)
self._offset2ids.extend(
[_id for _id in successful_indexed_ids if _id not in self._offset2ids.ids]
)
if len(successful_indexed_ids) != len(docs):
doc_ids = [doc.id for doc in docs]
failed_index_ids = set(doc_ids) - set(successful_indexed_ids)
err_msg = f'fail to add Documents with ids: {failed_index_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
from typing import Union, Iterable, Dict
import warnings
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with Elastic as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""
try:
return self._client.count(index=self._config.index_name)["count"]
except:
return 0
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._doc_id_exists(x)
elif isinstance(x, Document):
return self._doc_id_exists(x.id)
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayElastic` object"""
self._save_offset2ids()
# if not self._persist:
# self._offset2ids.clear()
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _upload_batch(self, docs: Iterable['Document']):
batch = []
failed_index = []
for doc in docs:
batch.append(self._document_to_elastic(doc))
if len(batch) > self._config.batch_size:
failed_index.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
batch = []
if len(batch) > 0:
failed_index.extend(self._send_requests(batch))
self._refresh(self._config.index_name)
return failed_index
def extend(self, docs: Iterable['Document']):
docs = list(docs)
failed_index = self._upload_batch(docs)
failed_ids = [index['_id'] for index in failed_index]
self._offset2ids.extend(
[
doc.id
for doc in docs
if (doc.id not in self._offset2ids.ids) and (doc.id not in failed_ids)
]
)
if len(failed_ids) > 0:
err_msg = f'fail to add Documents with ids: {failed_ids}'
warnings.warn(err_msg)
raise IndexError(err_msg)
|
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(
embed_dims=192,
num_heads=[6, 12, 24, 48],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536]))
train_dataloader = dict(batch_size=1, num_workers=1)
# learning policy
max_iters = 737500
param_scheduler = dict(end=max_iters, milestones=[655556, 710184])
# Before 735001th iteration, we do evaluation every 5000 iterations.
# After 735000th iteration, we do evaluation every 737500 iterations,
# which means that we do evaluation at the end of training.'
interval = 5000
dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
train_cfg = dict(
max_iters=max_iters,
val_interval=interval,
dynamic_intervals=dynamic_intervals)
|
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa
model = dict(
backbone=dict(
embed_dims=192,
num_heads=[6, 12, 24, 48],
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536]))
data = dict(samples_per_gpu=1, workers_per_gpu=1)
lr_config = dict(step=[655556, 710184])
max_iters = 737500
runner = dict(type='IterBasedRunner', max_iters=max_iters)
# Before 735001th iteration, we do evaluation every 5000 iterations.
# After 735000th iteration, we do evaluation every 737500 iterations,
# which means that we do evaluation at the end of training.'
interval = 5000
dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)]
evaluation = dict(
interval=interval,
dynamic_intervals=dynamic_intervals,
metric=['PQ', 'bbox', 'segm'])
|
from llama_index_instrumentation.event_handlers.base import BaseEventHandler
from llama_index_instrumentation.event_handlers.null import NullEventHandler
__all__ = ["BaseEventHandler", "NullEventHandler"]
|
from llama_index.core.instrumentation.event_handlers.base import BaseEventHandler
from llama_index.core.instrumentation.event_handlers.null import NullEventHandler
__all__ = ["BaseEventHandler", "NullEventHandler"]
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa
model = dict(
backbone=dict(
deepen_factor=0.33,
widen_factor=0.5,
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)),
neck=dict(in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1),
bbox_head=dict(in_channels=128, feat_channels=128, exp_on_reg=False))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='PackDetInputs')
]
train_pipeline_stage2 = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.5, 2.0),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
|
"""Standard LangChain interface tests."""
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_perplexity import ChatPerplexity
class TestPerplexityStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatPerplexity
@property
def chat_model_params(self) -> dict:
return {"model": "sonar"}
@pytest.mark.xfail(reason="TODO: handle in integration.")
def test_double_messages_conversation(self, model: BaseChatModel) -> None:
super().test_double_messages_conversation(model)
@pytest.mark.xfail(reason="Raises 400: Custom stop words not supported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
|
"""Standard LangChain interface tests."""
from typing import Type
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_tests.integration_tests import ChatModelIntegrationTests
from langchain_perplexity import ChatPerplexity
class TestPerplexityStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> Type[BaseChatModel]:
return ChatPerplexity
@property
def chat_model_params(self) -> dict:
return {"model": "sonar"}
@pytest.mark.xfail(reason="TODO: handle in integration.")
def test_double_messages_conversation(self, model: BaseChatModel) -> None:
super().test_double_messages_conversation(model)
@pytest.mark.xfail(reason="Raises 400: Custom stop words not supported.")
def test_stop_sequence(self, model: BaseChatModel) -> None:
super().test_stop_sequence(model)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
import mmcv
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--out-file', default=None, help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# register all modules in mmdet into the registries
register_all_modules()
# TODO: Support inference of image directory.
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = {
'CLASSES': model.CLASSES,
'PALETTE': args.palette
}
# test a single image
result = inference_detector(model, args.img)
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
'result',
img,
pred_sample=result,
show=True,
wait_time=0,
out_file=args.out_file,
pred_score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = {
'CLASSES': model.CLASSES,
'PALETTE': args.palette
}
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
visualizer.add_datasample(
'result',
img,
pred_sample=result[0],
show=True,
wait_time=0,
out_file=args.out_file,
pred_score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
# Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('--out-file', default=None, help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(
model,
args.img,
result,
palette=args.palette,
score_thr=args.score_thr,
out_file=args.out_file)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(
model,
args.img,
result[0],
palette=args.palette,
score_thr=args.score_thr,
out_file=args.out_file)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
"""
Demo for using xgboost with sklearn
===================================
"""
import multiprocessing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
if __name__ == "__main__":
print("Parallel Parameter optimization")
X, y = fetch_california_housing(return_X_y=True)
# Make sure the number of threads is balanced.
xgb_model = xgb.XGBRegressor(
n_jobs=multiprocessing.cpu_count() // 2, tree_method="hist"
)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4, 6], "n_estimators": [50, 100, 200]},
verbose=1,
n_jobs=2,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
|
"""
Demo for using xgboost with sklearn
===================================
"""
import multiprocessing
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import GridSearchCV
import xgboost as xgb
if __name__ == "__main__":
print("Parallel Parameter optimization")
X, y = fetch_california_housing(return_X_y=True)
xgb_model = xgb.XGBRegressor(
n_jobs=multiprocessing.cpu_count() // 2, tree_method="hist"
)
clf = GridSearchCV(
xgb_model,
{"max_depth": [2, 4, 6], "n_estimators": [50, 100, 200]},
verbose=1,
n_jobs=2,
)
clf.fit(X, y)
print(clf.best_score_)
print(clf.best_params_)
|
"""
Tool implementations for the Riza (https://riza.io) code interpreter API.
Documentation: https://docs.riza.io
API keys: https://dashboard.riza.io
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, ToolException
from pydantic import BaseModel, Field
class ExecPythonInput(BaseModel):
code: str = Field(description="the Python code to execute")
class ExecPython(BaseTool): # type: ignore[override, override]
"""Riza Code tool.
Setup:
Install ``langchain-community`` and ``rizaio`` and set environment variable ``RIZA_API_KEY``.
.. code-block:: bash
pip install -U langchain-community rizaio
export RIZA_API_KEY="your-api-key"
Instantiation:
.. code-block:: python
from langchain_community.tools.riza.command import ExecPython
tool = ExecPython()
Invocation with args:
.. code-block:: python
tool.invoke("x = 5; print(x)")
.. code-block:: python
'5\\n'
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
""" # noqa: E501
name: str = "riza_exec_python"
description: str = """Execute Python code to solve problems.
The Python runtime does not have filesystem access. You can use the httpx
or requests library to make HTTP requests. Always print output to stdout."""
args_schema: Type[BaseModel] = ExecPythonInput
handle_tool_error: bool = True
client: Any = None
runtime_revision_id: Optional[str] = None
def __init__(
self, runtime_revision_id: Optional[str] = None, **kwargs: Any
) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
self.runtime_revision_id = runtime_revision_id
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(
runtime_revision_id=self.runtime_revision_id, language="python", code=code
)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
class ExecJavaScriptInput(BaseModel):
code: str = Field(description="the JavaScript code to execute")
class ExecJavaScript(BaseTool): # type: ignore[override, override]
"""A tool implementation to execute JavaScript via Riza's Code Interpreter API."""
name: str = "riza_exec_javascript"
description: str = """Execute JavaScript code to solve problems.
The JavaScript runtime does not have filesystem access, but can use fetch
to make HTTP requests and does include the global JSON object. Always print
output to stdout."""
args_schema: Type[BaseModel] = ExecJavaScriptInput
handle_tool_error: bool = True
client: Any = None
runtime_revision_id: Optional[str] = None
def __init__(
self, runtime_revision_id: Optional[str] = None, **kwargs: Any
) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
self.runtime_revision_id = runtime_revision_id
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(
runtime_revision_id=self.runtime_revision_id,
language="javascript",
code=code,
)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
|
"""
Tool implementations for the Riza (https://riza.io) code interpreter API.
Documentation: https://docs.riza.io
API keys: https://dashboard.riza.io
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, ToolException
from pydantic import BaseModel, Field
class ExecPythonInput(BaseModel):
code: str = Field(description="the Python code to execute")
class ExecPython(BaseTool): # type: ignore[override, override]
"""Riza Code tool.
Setup:
Install ``langchain-community`` and ``rizaio`` and set environment variable ``RIZA_API_KEY``.
.. code-block:: bash
pip install -U langchain-community rizaio
export RIZA_API_KEY="your-api-key"
Instantiation:
.. code-block:: python
from langchain_community.tools.riza.command import ExecPython
tool = ExecPython()
Invocation with args:
.. code-block:: python
tool.invoke("x = 5; print(x)")
.. code-block:: python
'5\\n'
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
tool.invoke({"args": {"code":"x = 5; print(x)"}, "id": "1", "name": tool.name, "type": "tool_call"})
""" # noqa: E501
name: str = "riza_exec_python"
description: str = """Execute Python code to solve problems.
The Python runtime does not have filesystem access. You can use the httpx
or requests library to make HTTP requests. Always print output to stdout."""
args_schema: Type[BaseModel] = ExecPythonInput
handle_tool_error: bool = True
client: Any = None
def __init__(self, **kwargs: Any) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(language="PYTHON", code=code)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
class ExecJavaScriptInput(BaseModel):
code: str = Field(description="the JavaScript code to execute")
class ExecJavaScript(BaseTool): # type: ignore[override, override]
"""A tool implementation to execute JavaScript via Riza's Code Interpreter API."""
name: str = "riza_exec_javascript"
description: str = """Execute JavaScript code to solve problems.
The JavaScript runtime does not have filesystem access, but can use fetch
to make HTTP requests and does include the global JSON object. Always print
output to stdout."""
args_schema: Type[BaseModel] = ExecJavaScriptInput
handle_tool_error: bool = True
client: Any = None
def __init__(self, **kwargs: Any) -> None:
try:
from rizaio import Riza
except ImportError as e:
raise ImportError(
"Couldn't import the `rizaio` package. "
"Try running `pip install rizaio`."
) from e
super().__init__(**kwargs)
self.client = Riza()
def _run(
self, code: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
output = self.client.command.exec(language="JAVASCRIPT", code=code)
if output.exit_code > 0:
raise ToolException(
f"Riza code execution returned a non-zero exit code. "
f"The output captured from stderr was:\n{output.stderr}"
)
return output.stdout
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
loss = SparseMarginMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
|
from datasets import Dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseEncoderTrainer,
SparseMarginMSELoss,
SpladePooling,
)
# Initialize the SPLADE model
student_model_name = "prithivida/Splade_PP_en_v1"
student_model = SparseEncoder(
modules=[
MLMTransformer(student_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Initialize the SPLADE model
teacher_model_name = "naver/splade-cocondenser-ensembledistil"
teacher_model = SparseEncoder(
modules=[
MLMTransformer(teacher_model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create a small toy dataset
train_dataset = Dataset.from_dict(
{
"query": ["It's nice weather outside today.", "He drove to work."],
"passage1": ["It's so sunny.", "He took the car to work."],
"passage2": ["It's very sunny.", "She walked to the store."],
}
)
def compute_labels(batch):
emb_queries = teacher_model.encode(batch["query"])
emb_passages1 = teacher_model.encode(batch["passage1"])
emb_passages2 = teacher_model.encode(batch["passage2"])
return {
"label": teacher_model.similarity_pairwise(emb_queries, emb_passages1)
- teacher_model.similarity_pairwise(emb_queries, emb_passages2)
}
train_dataset = train_dataset.map(compute_labels, batched=True)
# In this example, the labels become -0.036 and 0.68, respectively
loss = SparseMarginMSELoss(student_model)
trainer = SparseEncoderTrainer(model=student_model, train_dataset=train_dataset, loss=loss)
trainer.train()
|
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
get_sinusoid,
get_wav_data,
save_wav,
skipIfNoSox,
TempDirMixin,
TorchaudioTestCase,
)
from .common import load_params
@skipIfNoSox
class SmokeTest(TempDirMixin, TorchaudioTestCase):
"""Run smoke test on various effects
The purpose of this test suite is to verify that sox_effect functionalities do not exhibit
abnormal behaviors.
This test suite should be able to run without any additional tools (such as sox command),
however without such tools, the correctness of each function cannot be verified.
"""
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
"""`apply_effects_tensor` should not crash"""
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
original = get_sinusoid(frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32")
_found, _sr = sox_effects.apply_effects_tensor(original, input_sr, effects)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = "int32"
channels_first = True
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path("input.wav")
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
_found, _sr = sox_effects.apply_effects_file(
input_path, effects, normalize=False, channels_first=channels_first
)
|
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
get_sinusoid,
get_wav_data,
save_wav,
skipIfNoSox,
TempDirMixin,
TorchaudioTestCase,
)
from .common import load_params
@skipIfNoSox
class SmokeTest(TempDirMixin, TorchaudioTestCase):
"""Run smoke test on various effects
The purpose of this test suite is to verify that sox_effect functionalities do not exhibit
abnormal behaviors.
This test suite should be able to run without any additional tools (such as sox command),
however without such tools, the correctness of each function cannot be verified.
"""
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
"""`apply_effects_tensor` should not crash"""
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
original = get_sinusoid(frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32")
_found, _sr = sox_effects.apply_effects_tensor(original, input_sr, effects)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = "int32"
channels_first = True
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path("input.wav")
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
_found, _sr = sox_effects.apply_effects_file(
input_path, effects, normalize=False, channels_first=channels_first
)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_fileobj(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = "int32"
channels_first = True
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path("input.wav")
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
with open(input_path, "rb") as fileobj:
_found, _sr = sox_effects.apply_effects_file(
fileobj, effects, normalize=False, channels_first=channels_first
)
|
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metadata keys
CUSTOM_METADATA_KEY = "DCP_SHARDING_INFO"
DEFAULT_EXTRA_METADATA_KEY = "__metadata__"
SAVED_OFFSETS_KEY = "saved_offsets"
SHAPE_KEY = "shape"
DATA_KEY = "data"
DTYPE_KEY = "dtype"
DATA_OFFSETS_KEY = "data_offsets"
DTYPE_MAP = {
"F16": torch.float16,
"F32": torch.float32,
"F64": torch.float64,
"I8": torch.int8,
"U8": torch.uint8,
"I16": torch.int16,
"I32": torch.int32,
"I64": torch.int64,
"BF16": torch.bfloat16,
}
HF_DCP_VERSION: float = 1.0
DCP_VERSION_KEY = "DCP_VERSION"
DCP_SHARDING_INFO_KEY = "DCP_SHARDING_INFO"
FORMAT_KEY = "format"
FORMAT_VALUE = "pt"
NUM_BYTES_FOR_HEADER_LEN = 8
@dataclass
class _HFStorageInfo:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
shape: torch.Size
dtype: torch.dtype
def _gen_file_name(
index: int, largest_index: int, shard_index: Optional[int] = None
) -> str:
if shard_index is not None:
return (
SHARDED_FILE_NAME.format(
shard_idx=f"{shard_index}".zfill(5),
cpt_idx=f"{index}".zfill(5),
num_files=f"{largest_index}".zfill(5),
)
+ SUFFIX
)
else:
return (
FILE_NAME.format(
cpt_idx=f"{index}".zfill(5), num_files=f"{largest_index}".zfill(5)
)
+ SUFFIX
)
def _get_safetensors_file_metadata(file_bytes: io.IOBase) -> tuple[Any, int]:
# this uses the same logic that's done in HF code base
# https://github.com/2404589803/huggingface_hub/blob/main/src/huggingface_hub/hf_api.py#L5308
# and follows their documentation on how their files are serialized
# https://huggingface.co/docs/safetensors/index#format
header_len_bytes = file_bytes.read(NUM_BYTES_FOR_HEADER_LEN)
header_len = struct.unpack("<Q", header_len_bytes)[0]
header_json = file_bytes.read(header_len)
metadata = json.loads(header_json)
return (metadata, header_len + NUM_BYTES_FOR_HEADER_LEN)
def _get_dtype(dtype_str: str) -> torch.dtype:
try:
dtype = DTYPE_MAP[dtype_str]
except KeyError:
dtype = torch.get_default_dtype()
return dtype
def _get_dcp_custom_metadata(metadata: Any) -> Optional[Any]:
if DEFAULT_EXTRA_METADATA_KEY in metadata:
custom_metadata = metadata[DEFAULT_EXTRA_METADATA_KEY]
if CUSTOM_METADATA_KEY in custom_metadata:
return json.loads(custom_metadata[CUSTOM_METADATA_KEY])
return None
|
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metadata keys
CUSTOM_METADATA_KEY = "DCP_SHARDING_INFO"
DEFAULT_EXTRA_METADATA_KEY = "__metadata__"
SAVED_OFFSETS_KEY = "saved_offsets"
SHAPE_KEY = "shape"
DATA_KEY = "data"
DTYPE_KEY = "dtype"
DATA_OFFSETS_KEY = "data_offsets"
DTYPE_MAP = {
"F16": torch.float16,
"F32": torch.float32,
"F64": torch.float64,
"I8": torch.int8,
"U8": torch.uint8,
"I16": torch.int16,
"I32": torch.int32,
"I64": torch.int64,
"BF16": torch.bfloat16,
}
HF_DCP_VERSION: float = 1.0
DCP_VERSION_KEY = "DCP_VERSION"
DCP_SHARDING_INFO_KEY = "DCP_SHARDING_INFO"
FORMAT_KEY = "format"
FORMAT_VALUE = "pt"
@dataclass
class _HFStorageInfo:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
shape: torch.Size
dtype: torch.dtype
def _gen_file_name(
index: int, largest_index: int, shard_index: Optional[int] = None
) -> str:
if shard_index is not None:
return (
SHARDED_FILE_NAME.format(
shard_idx=f"{shard_index}".zfill(5),
cpt_idx=f"{index}".zfill(5),
num_files=f"{largest_index}".zfill(5),
)
+ SUFFIX
)
else:
return (
FILE_NAME.format(
cpt_idx=f"{index}".zfill(5), num_files=f"{largest_index}".zfill(5)
)
+ SUFFIX
)
def _get_safetensors_file_metadata(file_bytes: io.IOBase) -> tuple[Any, int]:
# this uses the same logic that's done in HF code base
# https://github.com/2404589803/huggingface_hub/blob/main/src/huggingface_hub/hf_api.py#L5308
# and follows their documentation on how their files are serialized
# https://huggingface.co/docs/safetensors/index#format
num_bytes_for_header_len = 8
header_len_bytes = file_bytes.read(num_bytes_for_header_len)
header_len = struct.unpack("<Q", header_len_bytes)[0]
header_json = file_bytes.read(header_len)
metadata = json.loads(header_json)
return (metadata, header_len + num_bytes_for_header_len)
def _get_dtype(dtype_str: str) -> torch.dtype:
try:
dtype = DTYPE_MAP[dtype_str]
except KeyError:
dtype = torch.get_default_dtype()
return dtype
def _get_dcp_custom_metadata(metadata: Any) -> Optional[Any]:
if DEFAULT_EXTRA_METADATA_KEY in metadata:
custom_metadata = metadata[DEFAULT_EXTRA_METADATA_KEY]
if CUSTOM_METADATA_KEY in custom_metadata:
return json.loads(custom_metadata[CUSTOM_METADATA_KEY])
return None
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UpSampling1D")
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Example:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras.layers.UpSampling1D(size=2)(x)
>>> y
[[[ 0. 1. 2.]
[ 0. 1. 2.]
[ 3. 4. 5.]
[ 3. 4. 5.]]
[[ 6. 7. 8.]
[ 6. 7. 8.]
[ 9. 10. 11.]
[ 9. 10. 11.]]]
Args:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super().__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
size = (
self.size * input_shape[1] if input_shape[1] is not None else None
)
return [input_shape[0], size, input_shape[2]]
def call(self, inputs):
return ops.repeat(x=inputs, repeats=self.size, axis=1)
def get_config(self):
config = {"size": self.size}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UpSampling1D")
class UpSampling1D(Layer):
"""Upsampling layer for 1D inputs.
Repeats each temporal step `size` times along the time axis.
Example:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> x
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = keras.layers.UpSampling1D(size=2)(x)
>>> y
[[[ 0. 1. 2.]
[ 0. 1. 2.]
[ 3. 4. 5.]
[ 3. 4. 5.]]
[[ 6. 7. 8.]
[ 6. 7. 8.]
[ 9. 10. 11.]
[ 9. 10. 11.]]]
Args:
size: Integer. Upsampling factor.
Input shape:
3D tensor with shape: `(batch_size, steps, features)`.
Output shape:
3D tensor with shape: `(batch_size, upsampled_steps, features)`.
"""
def __init__(self, size=2, **kwargs):
super().__init__(**kwargs)
self.size = int(size)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
size = (
self.size * input_shape[1] if input_shape[1] is not None else None
)
return [input_shape[0], size, input_shape[2]]
def call(self, inputs):
return ops.repeat(x=inputs, repeats=self.size, axis=1)
def get_config(self):
config = {"size": self.size}
base_config = super().get_config()
return {**base_config, **config}
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640) # height, width
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
# According to the official implementation, multi-scale
# training is not considered here but in the
# 'mmdet/models/detectors/yolox.py'.
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
# If the image is three-channel, the pad value needs
# to be set separately for each channel.
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
persistent_workers=True,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
max_epochs = 300
num_last_epochs = 15
resume_from = None
interval = 10
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=num_last_epochs,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=max_epochs)
custom_hooks = [
dict(
type='YOLOXModeSwitchHook',
num_last_epochs=num_last_epochs,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=num_last_epochs,
interval=interval,
priority=48),
dict(
type='ExpMomentumEMAHook',
resume_from=resume_from,
momentum=0.0001,
priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(
save_best='auto',
# The evaluation interval is 'interval' when running epoch is
# less than ‘max_epochs - num_last_epochs’.
# The evaluation interval is 1 when running epoch is greater than
# or equal to ‘max_epochs - num_last_epochs’.
interval=interval,
dynamic_intervals=[(max_epochs - num_last_epochs, 1)],
metric='bbox')
log_config = dict(interval=50)
|
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py']
img_scale = (640, 640)
# model settings
model = dict(
type='YOLOX',
input_size=img_scale,
random_size_range=(15, 25),
random_size_interval=10,
backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5),
neck=dict(
type='YOLOXPAFPN',
in_channels=[128, 256, 512],
out_channels=128,
num_csp_blocks=1),
bbox_head=dict(
type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128),
train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)),
# In order to align the source code, the threshold of the val phase is
# 0.01, and the threshold of the test phase is 0.001.
test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65)))
# dataset settings
data_root = 'data/coco/'
dataset_type = 'CocoDataset'
train_pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', flip_ratio=0.5),
# According to the official implementation, multi-scale
# training is not considered here but in the
# 'mmdet/models/detectors/yolox.py'.
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(
type='Pad',
pad_to_square=True,
# If the image is three-channel, the pad value needs
# to be set separately for each channel.
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=train_pipeline)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(
type='Pad',
pad_to_square=True,
pad_val=dict(img=(114.0, 114.0, 114.0))),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
persistent_workers=True,
train=train_dataset,
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
# default 8 gpu
optimizer = dict(
type='SGD',
lr=0.01,
momentum=0.9,
weight_decay=5e-4,
nesterov=True,
paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=None)
max_epochs = 300
num_last_epochs = 15
resume_from = None
interval = 10
# learning policy
lr_config = dict(
_delete_=True,
policy='YOLOX',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=num_last_epochs,
min_lr_ratio=0.05)
runner = dict(type='EpochBasedRunner', max_epochs=max_epochs)
custom_hooks = [
dict(
type='YOLOXModeSwitchHook',
num_last_epochs=num_last_epochs,
priority=48),
dict(
type='SyncNormHook',
num_last_epochs=num_last_epochs,
interval=interval,
priority=48),
dict(
type='ExpMomentumEMAHook',
resume_from=resume_from,
momentum=0.0001,
priority=49)
]
checkpoint_config = dict(interval=interval)
evaluation = dict(
save_best='auto',
# The evaluation interval is 'interval' when running epoch is
# less than ‘max_epochs - num_last_epochs’.
# The evaluation interval is 1 when running epoch is greater than
# or equal to ‘max_epochs - num_last_epochs’.
interval=interval,
dynamic_intervals=[(max_epochs - num_last_epochs, 1)],
metric='bbox')
log_config = dict(interval=50)
|
import warnings
from abc import abstractmethod
from typing import Iterable, Iterator, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subindices', None):
for selector, da in self._subindices.items():
value = DocumentArray(value)
if getattr(da, '_config', None) and da._config.root_id:
for v in value:
for doc in DocumentArray(v)[selector]:
doc.tags['_root_id_'] = v.id
docs_selector = value[selector]
if len(docs_selector) > 0:
da.extend(docs_selector)
def insert(self, index: int, value: 'Document', **kwargs):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
:param kwargs: Additional Arguments that are passed to the Document Store. This has no effect for in-memory DocumentArray.
"""
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._append(value, **kwargs)
self._update_subindices_append_extend(value)
def _append(self, value, **kwargs):
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
from docarray.helper import check_root_id
if self._is_subindex:
check_root_id(self, values)
self._extend(values, **kwargs)
self._update_subindices_append_extend(values)
def _extend(self, values, **kwargs):
for value in values:
self._append(value, **kwargs)
|
from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subindices', None):
for selector, da in self._subindices.items():
docs_selector = DocumentArray(value)[selector]
if len(docs_selector) > 0:
da.extend(docs_selector)
def insert(self, index: int, value: 'Document', **kwargs):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
:param kwargs: Additional Arguments that are passed to the Document Store. This has no effect for in-memory DocumentArray.
"""
self._set_doc_by_id(value.id, value, **kwargs)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._append(value, **kwargs)
self._update_subindices_append_extend(value)
def _append(self, value, **kwargs):
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
self._extend(values, **kwargs)
self._update_subindices_append_extend(values)
def _extend(self, values, **kwargs):
for value in values:
self._append(value, **kwargs)
|
import itertools
import os.path
import pytest
import requests as req
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port(), random_port()], protocol=['grpc', 'http', 'websocket'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
def test_flow_multiprotocol_with_monitoring():
port_monitoring = random_port()
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(
port=ports, protocol=protocols, monitoring=True, port_monitoring=port_monitoring
)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
resp = req.get(f'http://localhost:{port_monitoring}/')
assert resp.status_code == 200
assert (
'jina_successful_requests_total{runtime_name="gateway/rep-0"} 3.0'
in str(resp.content)
)
def test_flow_multiprotocol_with_tracing():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(port=ports, protocol=protocols, tracing=True)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
|
import itertools
import os.path
import pytest
import requests as req
from docarray import Document, DocumentArray
from jina import Client, Executor, Flow, requests
from jina.helper import random_port
PROTOCOLS = ['grpc', 'http', 'websocket']
cur_dir = os.path.dirname(__file__)
class MyExecutor(Executor):
@requests
def foo(self, docs: DocumentArray, **kwargs):
for doc in docs:
doc.text = 'processed'
@pytest.mark.parametrize(
'ports,protocols',
[
*[
([random_port(), random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=3)
],
*[
([random_port(), random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=2)
],
*[
([random_port()], list(protocols))
for protocols in itertools.combinations(PROTOCOLS, r=1)
],
],
)
def test_flow_multiprotocol(ports, protocols):
flow = Flow().config_gateway(port=ports, protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
list(protocols)
for protocols in itertools.chain(
itertools.combinations(PROTOCOLS, r=3),
itertools.combinations(PROTOCOLS, r=2),
)
],
)
def test_flow_multiprotocol_default_random_ports(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(flow.port, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
@pytest.mark.parametrize(
'protocols',
[
['grpc'],
['http'],
['websocket'],
],
)
def test_flow_single_protocol_default_random_port(protocols):
flow = Flow().config_gateway(protocol=protocols).add(uses=MyExecutor)
with flow:
for protocol in protocols:
client = Client(port=flow.port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_aliases():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(ports=ports, protocols=protocols).add(uses=MyExecutor)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
docs = client.post('/', inputs=[Document()])
for doc in docs:
assert doc.text == 'processed'
def test_flow_multiprotocol_yaml():
flow = Flow.load_config(os.path.join(cur_dir, 'yaml/multi-protocol.yml'))
with flow:
for port, protocol in zip([12345, 12344, 12343], ['grpc', 'http', 'websocket']):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
def test_flow_multiprotocol_ports_protocols_mismatch():
flow = Flow().config_gateway(port=[random_port()], protocol=['grpc', 'http'])
with pytest.raises(ValueError) as err_info:
with flow:
pass
assert (
'You need to specify as much protocols as ports if you want to use a jina built-in gateway'
in err_info.value.args[0]
)
def test_flow_multiprotocol_with_monitoring():
port_monitoring = random_port()
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(
port=ports, protocol=protocols, monitoring=True, port_monitoring=port_monitoring
)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
resp = req.get(f'http://localhost:{port_monitoring}/')
assert resp.status_code == 200
assert (
'jina_successful_requests_total{runtime_name="gateway/rep-0"} 3.0'
in str(resp.content)
)
def test_flow_multiprotocol_with_tracing():
ports = [random_port(), random_port(), random_port()]
protocols = PROTOCOLS
flow = Flow().config_gateway(port=ports, protocol=protocols, tracing=True)
with flow:
for port, protocol in zip(ports, protocols):
client = Client(port=port, protocol=protocol)
client.post('/', inputs=[Document()])
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
|
import logging
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseTripletEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load triplets from the AllNLI dataset
# The dataset contains triplets of (anchor, positive, negative) sentences
dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev[:1000]")
# Initialize the SparseTripletEvaluator
evaluator = SparseTripletEvaluator(
anchors=dataset[:1000]["anchor"],
positives=dataset[:1000]["positive"],
negatives=dataset[:1000]["negative"],
name="all_nli_dev",
batch_size=32,
show_progress_bar=True,
)
# Run the evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from rich.console import Console
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import PlotMixin, ProtoMixin
from docarray.typing import ID
_console: Console = Console()
class BaseDocument(BaseModel, PlotMixin, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
def __str__(self):
with _console.capture() as capture:
_console.print(self)
return capture.get().strip()
|
import os
from typing import Type
import orjson
from pydantic import BaseModel, Field, parse_obj_as
from docarray.base_document.abstract_document import AbstractDocument
from docarray.base_document.base_node import BaseNode
from docarray.base_document.io.json import orjson_dumps, orjson_dumps_and_decode
from docarray.base_document.mixins import ProtoMixin
from docarray.typing import ID
class BaseDocument(BaseModel, ProtoMixin, AbstractDocument, BaseNode):
"""
The base class for Document
"""
id: ID = Field(default_factory=lambda: parse_obj_as(ID, os.urandom(16).hex()))
class Config:
json_loads = orjson.loads
json_dumps = orjson_dumps_and_decode
json_encoders = {dict: orjson_dumps}
validate_assignment = True
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema. Could be useful for
reconstruction of Document in serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].outer_type_
|
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
self,
path_vectorizer: Optional[str] = None,
batch_size: int = 2048,
traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
"""
super().__init__(*args, **kwargs)
if path_vectorizer is None:
path_vectorizer = str(
Path(__file__).parent / 'model/tfidf_vectorizer.pickle'
)
self.path_vectorizer = path_vectorizer
self.batch_size = batch_size
self.traversal_paths = traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Generate the TF-IDF feature vector for all text documents.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
"""
if docs is None:
return
document_batches_generator = docs.traverse_flat(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
filter_fn=lambda doc:len(doc.text)>0
).batch(
batch_size=parameters.get('batch_size', self.batch_size),
)
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
import os
import pickle
from pathlib import Path
from typing import Optional, Tuple
from jina import DocumentArray, Executor, requests
from jina.excepts import PretrainedModelFileDoesNotExist
class TFIDFTextEncoder(Executor):
"""
Encode text into tf-idf sparse embeddings
"""
def __init__(
self,
path_vectorizer: Optional[str] = None,
batch_size: int = 2048,
traversal_paths: Tuple[str] = ('r',),
*args,
**kwargs,
):
"""
:param path_vectorizer: path of the pre-trained tfidf sklearn vectorizer
:param traversal_paths: fallback traversal path in case there is not traversal path sent in the request
:param batch_size: fallback batch size in case there is not batch size sent in the request
"""
super().__init__(*args, **kwargs)
if path_vectorizer is None:
path_vectorizer = str(
Path(__file__).parent / 'model/tfidf_vectorizer.pickle'
)
self.path_vectorizer = path_vectorizer
self.batch_size = batch_size
self.traversal_paths = traversal_paths
if os.path.exists(self.path_vectorizer):
self.tfidf_vectorizer = pickle.load(open(self.path_vectorizer, 'rb'))
else:
raise PretrainedModelFileDoesNotExist(
f'{self.path_vectorizer} not found, cannot find a fitted tfidf_vectorizer'
)
@requests
def encode(
self, docs: Optional[DocumentArray] = None, parameters: dict = {}, **kwargs
):
"""
Generate the TF-IDF feature vector for all text documents.
:param docs: documents sent to the encoder. The docs must have `text`.
By default, the input `text` must be a `list` of `str`.
:param parameters: dictionary to define the `traversal_paths` and the
`batch_size`. For example,
`parameters={'traversal_paths': ['r'], 'batch_size': 10}`.
"""
if docs is None:
return
document_batches_generator = docs.batch(
traversal_paths=parameters.get('traversal_paths', self.traversal_paths),
batch_size=parameters.get('batch_size', self.batch_size),
require_attr='text',
)
for document_batch in document_batches_generator:
iterable_of_texts = [d.text for d in document_batch]
embedding_matrix = self.tfidf_vectorizer.transform(iterable_of_texts)
for doc, doc_embedding in zip(document_batch, embedding_matrix):
doc.embedding = doc_embedding
|
__version__ = '0.13.26'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.25'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(BaseDoc):
d: SimpleDoc
tens: NdArray[50]
def test_persist_and_restore(tmp_path):
query = SimpleDoc(tens=np.random.random((10,)))
# create index
store = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
store.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(10)])
assert store.num_docs() == 10
find_results_before = store.find(query, search_field='tens', limit=5)
# delete and restore
del store
store = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
assert store.num_docs() == 10
find_results_after = store.find(query, search_field='tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert (doc_before.tens == doc_after.tens).all()
# add new data
store.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(5)])
assert store.num_docs() == 15
def test_persist_and_restore_nested(tmp_path):
query = NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
# create index
store = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
store.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(10)
]
)
assert store.num_docs() == 10
find_results_before = store.find(query, search_field='d__tens', limit=5)
# delete and restore
del store
store = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
assert store.num_docs() == 10
find_results_after = store.find(query, search_field='d__tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert (doc_before.tens == doc_after.tens).all()
# delete and restore
store.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(5)
]
)
assert store.num_docs() == 15
|
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDocument
from docarray.index import HnswDocumentIndex
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
class SimpleDoc(BaseDocument):
tens: NdArray[10] = Field(dim=1000)
class NestedDoc(BaseDocument):
d: SimpleDoc
tens: NdArray[50]
def test_persist_and_restore(tmp_path):
query = SimpleDoc(tens=np.random.random((10,)))
# create index
store = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
store.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(10)])
assert store.num_docs() == 10
find_results_before = store.find(query, search_field='tens', limit=5)
# delete and restore
del store
store = HnswDocumentIndex[SimpleDoc](work_dir=str(tmp_path))
assert store.num_docs() == 10
find_results_after = store.find(query, search_field='tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert (doc_before.tens == doc_after.tens).all()
# add new data
store.index([SimpleDoc(tens=np.random.random((10,))) for _ in range(5)])
assert store.num_docs() == 15
def test_persist_and_restore_nested(tmp_path):
query = NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
# create index
store = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
store.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(10)
]
)
assert store.num_docs() == 10
find_results_before = store.find(query, search_field='d__tens', limit=5)
# delete and restore
del store
store = HnswDocumentIndex[NestedDoc](work_dir=str(tmp_path))
assert store.num_docs() == 10
find_results_after = store.find(query, search_field='d__tens', limit=5)
for doc_before, doc_after in zip(find_results_before[0], find_results_after[0]):
assert doc_before.id == doc_after.id
assert (doc_before.tens == doc_after.tens).all()
# delete and restore
store.index(
[
NestedDoc(
tens=np.random.random((50,)), d=SimpleDoc(tens=np.random.random((10,)))
)
for _ in range(5)
]
)
assert store.num_docs() == 15
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
cudnn_benchmark = True
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='Resize',
img_scale=(640, 640),
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=(640, 640)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=(640, 640)),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(640, 640),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=64),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=4,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.08,
momentum=0.9,
weight_decay=0.0001,
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.1,
step=[30, 40])
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=50)
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
__all__ = ['DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
__all__ = ['DropBlock']
|
from docarray.array.array.array import DocumentArray
from docarray.array.stacked.array_stacked import DocumentArrayStacked
__all__ = ['DocumentArray', 'DocumentArrayStacked']
|
from docarray.array.array import DocumentArray
from docarray.array.array_stacked import DocumentArrayStacked
__all__ = ['DocumentArray', 'DocumentArrayStacked']
|
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing.plotting import run_categorical
try:
import matplotlib
matplotlib.use("Agg")
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(), tm.no_graphviz()))
class TestPlotting:
def test_plotting(self):
m, _ = tm.load_agaricus(__file__)
booster = xgb.train(
{"max_depth": 2, "eta": 1, "objective": "binary:logistic"},
m,
num_boost_round=2,
)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == "Feature importance"
assert ax.get_xlabel() == "Importance score"
assert ax.get_ylabel() == "Features"
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color="r", title="t", xlabel="x", ylabel="y")
assert isinstance(ax, Axes)
assert ax.get_title() == "t"
assert ax.get_xlabel() == "x"
assert ax.get_ylabel() == "y"
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(
booster, color=["r", "r", "b", "b"], title=None, xlabel=None, ylabel=None
)
assert isinstance(ax, Axes)
assert ax.get_title() == ""
assert ax.get_xlabel() == ""
assert ax.get_ylabel() == ""
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, tree_idx=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, tree_idx=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0.0, 11.0)
assert ax.get_ylim() == (-1.0, 71.0)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0.0, 5.0)
assert ax.get_ylim() == (10.0, 71.0)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self) -> None:
run_categorical("approx", "cpu")
|
import json
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
try:
import matplotlib
matplotlib.use('Agg')
from graphviz import Source
from matplotlib.axes import Axes
except ImportError:
pass
pytestmark = pytest.mark.skipif(**tm.no_multiple(tm.no_matplotlib(),
tm.no_graphviz()))
class TestPlotting:
def test_plotting(self):
m, _ = tm.load_agaricus(__file__)
booster = xgb.train({'max_depth': 2, 'eta': 1,
'objective': 'binary:logistic'}, m,
num_boost_round=2)
ax = xgb.plot_importance(booster)
assert isinstance(ax, Axes)
assert ax.get_title() == 'Feature importance'
assert ax.get_xlabel() == 'Importance score'
assert ax.get_ylabel() == 'Features'
assert len(ax.patches) == 4
ax = xgb.plot_importance(booster, color='r',
title='t', xlabel='x', ylabel='y')
assert isinstance(ax, Axes)
assert ax.get_title() == 't'
assert ax.get_xlabel() == 'x'
assert ax.get_ylabel() == 'y'
assert len(ax.patches) == 4
for p in ax.patches:
assert p.get_facecolor() == (1.0, 0, 0, 1.0) # red
ax = xgb.plot_importance(booster, color=['r', 'r', 'b', 'b'],
title=None, xlabel=None, ylabel=None)
assert isinstance(ax, Axes)
assert ax.get_title() == ''
assert ax.get_xlabel() == ''
assert ax.get_ylabel() == ''
assert len(ax.patches) == 4
assert ax.patches[0].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[1].get_facecolor() == (1.0, 0, 0, 1.0) # red
assert ax.patches[2].get_facecolor() == (0, 0, 1.0, 1.0) # blue
assert ax.patches[3].get_facecolor() == (0, 0, 1.0, 1.0) # blue
g = xgb.to_graphviz(booster, tree_idx=0)
assert isinstance(g, Source)
ax = xgb.plot_tree(booster, tree_idx=0)
assert isinstance(ax, Axes)
def test_importance_plot_lim(self):
np.random.seed(1)
dm = xgb.DMatrix(np.random.randn(100, 100), label=[0, 1] * 50)
bst = xgb.train({}, dm)
assert len(bst.get_fscore()) == 71
ax = xgb.plot_importance(bst)
assert ax.get_xlim() == (0., 11.)
assert ax.get_ylim() == (-1., 71.)
ax = xgb.plot_importance(bst, xlim=(0, 5), ylim=(10, 71))
assert ax.get_xlim() == (0., 5.)
assert ax.get_ylim() == (10., 71.)
def run_categorical(self, tree_method: str) -> None:
X, y = tm.make_categorical(1000, 31, 19, onehot=False)
reg = xgb.XGBRegressor(
enable_categorical=True, n_estimators=10, tree_method=tree_method
)
reg.fit(X, y)
trees = reg.get_booster().get_dump(dump_format="json")
for tree in trees:
j_tree = json.loads(tree)
assert "leaf" in j_tree.keys() or isinstance(
j_tree["split_condition"], list
)
graph = xgb.to_graphviz(reg, tree_idx=len(j_tree) - 1)
assert isinstance(graph, Source)
ax = xgb.plot_tree(reg, tree_idx=len(j_tree) - 1)
assert isinstance(ax, Axes)
@pytest.mark.skipif(**tm.no_pandas())
def test_categorical(self) -> None:
self.run_categorical("approx")
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .masktrack_rcnn_tracker import MaskTrackRCNNTracker
from .ocsort_tracker import OCSORTTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
from .strongsort_tracker import StrongSORTTracker
__all__ = [
'BaseTracker', 'ByteTracker', 'QuasiDenseTracker', 'SORTTracker',
'StrongSORTTracker', 'OCSORTTracker', 'MaskTrackRCNNTracker'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_tracker import BaseTracker
from .byte_tracker import ByteTracker
from .masktrack_rcnn_tracker import MaskTrackRCNNTracker
from .quasi_dense_tracker import QuasiDenseTracker
from .sort_tracker import SORTTracker
__all__ = [
'BaseTracker', 'ByteTracker', 'QuasiDenseTracker', 'SORTTracker',
'MaskTrackRCNNTracker'
]
|
# coding: utf-8
"""Comparison of `binary` and `xentropy` objectives.
BLUF: The `xentropy` objective does logistic regression and generalizes
to the case where labels are probabilistic (i.e. numbers between 0 and 1).
Details: Both `binary` and `xentropy` minimize the log loss and use
`boost_from_average = TRUE` by default. Possibly the only difference
between them with default settings is that `binary` may achieve a slight
speed improvement by assuming that the labels are binary instead of
probabilistic.
"""
import time
import numpy as np
import pandas as pd
from scipy.special import expit
import lightgbm as lgb
#################
# Simulate some binary data with a single categorical and
# single continuous predictor
np.random.seed(0)
N = 1000
X = pd.DataFrame({"continuous": range(N), "categorical": np.repeat([0, 1, 2, 3, 4], N / 5)})
CATEGORICAL_EFFECTS = [-1, -1, -2, -2, 2]
LINEAR_TERM = np.array(
[-0.5 + 0.01 * X["continuous"][k] + CATEGORICAL_EFFECTS[X["categorical"][k]] for k in range(X.shape[0])]
) + np.random.normal(0, 1, X.shape[0])
TRUE_PROB = expit(LINEAR_TERM)
Y = np.random.binomial(1, TRUE_PROB, size=N)
DATA = {
"X": X,
"probability_labels": TRUE_PROB,
"binary_labels": Y,
"lgb_with_binary_labels": lgb.Dataset(X, Y),
"lgb_with_probability_labels": lgb.Dataset(X, TRUE_PROB),
}
#################
# Set up a couple of utilities for our experiments
def log_loss(preds, labels):
"""Logarithmic loss with non-necessarily-binary labels."""
log_likelihood = np.sum(labels * np.log(preds)) / len(preds)
return -log_likelihood
def experiment(objective, label_type, data):
"""Measure performance of an objective.
Parameters
----------
objective : {'binary', 'xentropy'}
Objective function.
label_type : {'binary', 'probability'}
Type of the label.
data : dict
Data for training.
Returns
-------
result : dict
Experiment summary stats.
"""
np.random.seed(0)
nrounds = 5
lgb_data = data[f"lgb_with_{label_type}_labels"]
params = {"objective": objective, "feature_fraction": 1, "bagging_fraction": 1, "verbose": -1}
time_zero = time.time()
gbm = lgb.train(params, lgb_data, num_boost_round=nrounds)
y_fitted = gbm.predict(data["X"])
y_true = data[f"{label_type}_labels"]
duration = time.time() - time_zero
return {"time": duration, "correlation": np.corrcoef(y_fitted, y_true)[0, 1], "logloss": log_loss(y_fitted, y_true)}
#################
# Observe the behavior of `binary` and `xentropy` objectives
print("Performance of `binary` objective with binary labels:")
print(experiment("binary", label_type="binary", data=DATA))
print("Performance of `xentropy` objective with binary labels:")
print(experiment("xentropy", label_type="binary", data=DATA))
print("Performance of `xentropy` objective with probability labels:")
print(experiment("xentropy", label_type="probability", data=DATA))
# Trying this throws an error on non-binary values of y:
# experiment('binary', label_type='probability', DATA)
# The speed of `binary` is not drastically different than
# `xentropy`. `xentropy` runs faster than `binary` in many cases, although
# there are reasons to suspect that `binary` should run faster when the
# label is an integer instead of a float
K = 10
A = [experiment("binary", label_type="binary", data=DATA)["time"] for k in range(K)]
B = [experiment("xentropy", label_type="binary", data=DATA)["time"] for k in range(K)]
print(f"Best `binary` time: {min(A)}")
print(f"Best `xentropy` time: {min(B)}")
|
# coding: utf-8
"""Comparison of `binary` and `xentropy` objectives.
BLUF: The `xentropy` objective does logistic regression and generalizes
to the case where labels are probabilistic (i.e. numbers between 0 and 1).
Details: Both `binary` and `xentropy` minimize the log loss and use
`boost_from_average = TRUE` by default. Possibly the only difference
between them with default settings is that `binary` may achieve a slight
speed improvement by assuming that the labels are binary instead of
probabilistic.
"""
import time
import numpy as np
import pandas as pd
from scipy.special import expit
import lightgbm as lgb
#################
# Simulate some binary data with a single categorical and
# single continuous predictor
np.random.seed(0)
N = 1000
X = pd.DataFrame({
'continuous': range(N),
'categorical': np.repeat([0, 1, 2, 3, 4], N / 5)
})
CATEGORICAL_EFFECTS = [-1, -1, -2, -2, 2]
LINEAR_TERM = np.array([
-0.5 + 0.01 * X['continuous'][k]
+ CATEGORICAL_EFFECTS[X['categorical'][k]] for k in range(X.shape[0])
]) + np.random.normal(0, 1, X.shape[0])
TRUE_PROB = expit(LINEAR_TERM)
Y = np.random.binomial(1, TRUE_PROB, size=N)
DATA = {
'X': X,
'probability_labels': TRUE_PROB,
'binary_labels': Y,
'lgb_with_binary_labels': lgb.Dataset(X, Y),
'lgb_with_probability_labels': lgb.Dataset(X, TRUE_PROB),
}
#################
# Set up a couple of utilities for our experiments
def log_loss(preds, labels):
"""Logarithmic loss with non-necessarily-binary labels."""
log_likelihood = np.sum(labels * np.log(preds)) / len(preds)
return -log_likelihood
def experiment(objective, label_type, data):
"""Measure performance of an objective.
Parameters
----------
objective : {'binary', 'xentropy'}
Objective function.
label_type : {'binary', 'probability'}
Type of the label.
data : dict
Data for training.
Returns
-------
result : dict
Experiment summary stats.
"""
np.random.seed(0)
nrounds = 5
lgb_data = data[f"lgb_with_{label_type}_labels"]
params = {
'objective': objective,
'feature_fraction': 1,
'bagging_fraction': 1,
'verbose': -1
}
time_zero = time.time()
gbm = lgb.train(params, lgb_data, num_boost_round=nrounds)
y_fitted = gbm.predict(data['X'])
y_true = data[f"{label_type}_labels"]
duration = time.time() - time_zero
return {
'time': duration,
'correlation': np.corrcoef(y_fitted, y_true)[0, 1],
'logloss': log_loss(y_fitted, y_true)
}
#################
# Observe the behavior of `binary` and `xentropy` objectives
print('Performance of `binary` objective with binary labels:')
print(experiment('binary', label_type='binary', data=DATA))
print('Performance of `xentropy` objective with binary labels:')
print(experiment('xentropy', label_type='binary', data=DATA))
print('Performance of `xentropy` objective with probability labels:')
print(experiment('xentropy', label_type='probability', data=DATA))
# Trying this throws an error on non-binary values of y:
# experiment('binary', label_type='probability', DATA)
# The speed of `binary` is not drastically different than
# `xentropy`. `xentropy` runs faster than `binary` in many cases, although
# there are reasons to suspect that `binary` should run faster when the
# label is an integer instead of a float
K = 10
A = [experiment('binary', label_type='binary', data=DATA)['time']
for k in range(K)]
B = [experiment('xentropy', label_type='binary', data=DATA)['time']
for k in range(K)]
print(f"Best `binary` time: {min(A)}")
print(f"Best `xentropy` time: {min(B)}")
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../cascade_rcnn/cascade-mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=[
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared4Conv1FCBBoxHead',
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
norm_cfg=norm_cfg,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.base_document import DocumentResponse
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/", response_model=Mmdoc, response_class=DocumentResponse)
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocumentResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
doc = OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDocument):
text: str
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc, response_class=DocumentResponse)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
doc = OutputDoc.parse_raw(response.content.decode())
assert isinstance(doc, OutputDoc)
assert doc.embedding_clip.shape == (100, 1)
assert doc.embedding_bert.shape == (100, 1)
|
import numpy as np
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from docarray import BaseDocument
from docarray.documents import Image, Text
from docarray.typing import NdArray
@pytest.mark.asyncio
async def test_fast_api():
class Mmdoc(BaseDocument):
img: Image
text: Text
title: str
input_doc = Mmdoc(
img=Image(tensor=np.zeros((3, 224, 224))), text=Text(), title='hello'
)
app = FastAPI()
@app.post("/doc/")
async def create_item(doc: Mmdoc):
return doc
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_image():
class InputDoc(BaseDocument):
img: Image
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(img=Image(tensor=np.zeros((3, 224, 224))))
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
@pytest.mark.asyncio
async def test_sentence_to_embeddings():
class InputDoc(BaseDocument):
text: str
class OutputDoc(BaseDocument):
embedding_clip: NdArray
embedding_bert: NdArray
input_doc = InputDoc(text='hello')
app = FastAPI()
@app.post("/doc/", response_model=OutputDoc)
async def create_item(doc: InputDoc) -> OutputDoc:
## call my fancy model to generate the embeddings
return OutputDoc(
embedding_clip=np.zeros((100, 1)), embedding_bert=np.zeros((100, 1))
)
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post("/doc/", data=input_doc.json())
resp_doc = await ac.get("/docs")
resp_redoc = await ac.get("/redoc")
assert response.status_code == 200
assert resp_doc.status_code == 200
assert resp_redoc.status_code == 200
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
# use ResNeSt img_norm
data_preprocessor=dict(
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
type='ResNeSt',
stem_channels=64,
depth=50,
radix=2,
reduction_factor=4,
avg_down_stride=True,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://resnest50')),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=norm_cfg),
mask_head=dict(norm_cfg=norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomChoiceResize',
scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocumentArrayStacked
from docarray.array.abstract_array import AnyDocumentArray
class DocumentArraySummary:
def __init__(self, da: 'AnyDocumentArray'):
self.da = da
def summary(self) -> None:
"""
Print a summary of this DocumentArray object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocumentArrayStacked
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.da.__class__.__name__)
table.add_row('Length', str(len(self.da)), end_section=True)
if isinstance(self.da, DocumentArrayStacked):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(da=self.da)
for field_name in stacked_fields:
val = self.da
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocumentArray Summary', expand=False))
self.da.document_type.schema_summary()
@staticmethod
def _get_stacked_fields(da: 'DocumentArrayStacked') -> List[str]: # TODO this might
# broken
"""
Return a list of the field names of a DocumentArrayStacked instance that are
stacked, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in da._storage.tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in da._storage.doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocumentArraySummary._get_stacked_fields(da=value_doc)
]
)
return fields
|
from typing import TYPE_CHECKING, List
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array import DocumentArrayStacked
from docarray.array.abstract_array import AnyDocumentArray
class DocumentArraySummary:
def __init__(self, da: 'AnyDocumentArray'):
self.da = da
def summary(self) -> None:
"""
Print a summary of this DocumentArray object and a summary of the schema of its
Document type.
"""
from rich import box
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from docarray.array import DocumentArrayStacked
table = Table(box=box.SIMPLE, highlight=True)
table.show_header = False
table.add_row('Type', self.da.__class__.__name__)
table.add_row('Length', str(len(self.da)), end_section=True)
if isinstance(self.da, DocumentArrayStacked):
table.add_row('Stacked columns:')
stacked_fields = self._get_stacked_fields(da=self.da)
for field_name in stacked_fields:
val = self.da
for attr in field_name.split('.'):
val = getattr(val, attr)
if isinstance(val, AbstractTensor):
comp_be = val.get_comp_backend()
if comp_be.to_numpy(comp_be.isnan(val)).all():
col_2 = f'None ({val.__class__.__name__})'
else:
col_2 = (
f'{val.__class__.__name__} of shape {comp_be.shape(val)}'
f', dtype: {comp_be.dtype(val)}'
)
if comp_be.device(val):
col_2 += f', device: {comp_be.device(val)}'
table.add_row(f' • {field_name}:', col_2)
Console().print(Panel(table, title='DocumentArray Summary', expand=False))
self.da.document_type.schema_summary()
@staticmethod
def _get_stacked_fields(da: 'DocumentArrayStacked') -> List[str]:
"""
Return a list of the field names of a DocumentArrayStacked instance that are
stacked, i.e. all the fields that are of type AbstractTensor. Nested field
paths are separated by dot, such as: 'attr.nested_attr'.
"""
fields = []
for field_name, value_tens in da._tensor_columns.items():
fields.append(field_name)
for field_name, value_doc in da._doc_columns.items():
fields.extend(
[
f'{field_name}.{x}'
for x in DocumentArraySummary._get_stacked_fields(da=value_doc)
]
)
return fields
|
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
if docarray_v2:
from docarray import DocList, BaseDoc
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoint: str,
target: Optional[str],
parameters: Optional[dict],
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(
endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
else:
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
if not docarray_v2:
da = DocumentArray([])
else:
if len(batch) > 0:
da = DocList[batch[0].__class__]()
else:
da = DocList[BaseDoc]()
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.document_array_cls = da.__class__
req.data.docs = da
|
"""Module for helper functions for clients."""
from typing import Optional, Tuple
from jina._docarray import Document, DocumentArray, docarray_v2
from jina.enums import DataInputType
from jina.types.request.data import DataRequest
if docarray_v2:
from docarray import DocList
def _new_data_request_from_batch(
batch,
data_type: DataInputType,
endpoint: str,
target: Optional[str],
parameters: Optional[dict],
) -> DataRequest:
req = _new_data_request(endpoint, target, parameters)
# add docs fields
_add_docs(req, batch, data_type)
return req
def _new_data_request(
endpoint: str, target: Optional[str], parameters: Optional[dict]
) -> DataRequest:
req = DataRequest()
# set up header
req.header.exec_endpoint = endpoint
if target:
req.header.target_executor = target
# add parameters field
if parameters:
req.parameters = parameters
return req
def _new_doc_from_data(
data, data_type: DataInputType
) -> Tuple['Document', 'DataInputType']:
def _build_doc_from_content():
return Document(content=data), DataInputType.CONTENT
if data_type == DataInputType.DICT:
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
if data_type == DataInputType.AUTO or data_type == DataInputType.DOCUMENT:
if isinstance(data, Document):
# if incoming is already primitive type Document, then all good, best practice!
return data, DataInputType.DOCUMENT
elif isinstance(data, dict):
return (
(Document(**data), DataInputType.DICT)
if docarray_v2
else (Document.from_dict(data), DataInputType.DICT)
)
else:
try:
d = Document(data)
return d, DataInputType.DOCUMENT # NOT HIT
except ValueError:
# AUTO has a fallback, now reconsider it as content
if data_type == DataInputType.AUTO:
return _build_doc_from_content()
else:
raise
elif data_type == DataInputType.CONTENT:
return _build_doc_from_content()
def _add_docs(req: DataRequest, batch, data_type: DataInputType) -> None:
if not docarray_v2:
da = DocumentArray([])
else:
da = DocList[batch[0].__class__]()
for content in batch:
d, data_type = _new_doc_from_data(content, data_type)
da.append(d)
req.document_array_cls = da.__class__
req.data.docs = da
|
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, decode_webp, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
img_webp = read_image(str(SCRIPT_DIR / "assets/fakedata/logos/rgb_pytorch.webp"))
if img_webp.shape != (3, 100, 100):
raise RuntimeError(f"Unexpected shape of img_webp: {img_webp.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights, progress=False).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
print(f"{torch.ops.image._jpeg_version() = }")
if not torch.ops.image._is_compiled_against_turbo():
msg = "Torchvision wasn't compiled against libjpeg-turbo"
if os.getenv("IS_M1_CONDA_BUILD_JOB") == "1":
# When building the conda package on M1, it's difficult to enforce
# that we build against turbo due to interactions with the libwebp
# package. So we just accept it, instead of raising an error.
print(msg)
else:
raise ValueError(msg)
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
# TODO: remove once pytorch/pytorch#110436 is resolved
if sys.version_info < (3, 12, 0):
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights, progress=False).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
# Turn 1.11.0aHASH into 1.11 (major.minor only)
version = ".".join(torchvision.__version__.split(".")[:2])
if version >= "0.16":
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
# TODO: remove once pytorch/pytorch#110436 is resolved
if sys.version_info < (3, 12, 0):
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import Datapoint
from ._image import Image
from ._mask import Mask
from ._video import Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
from torchvision import _BETA_TRANSFORMS_WARNING, _WARN_ABOUT_BETA_TRANSFORMS
from ._bounding_box import BoundingBoxes, BoundingBoxFormat
from ._datapoint import _FillType, _FillTypeJIT, _InputType, _InputTypeJIT, Datapoint
from ._image import _ImageType, _ImageTypeJIT, _TensorImageType, _TensorImageTypeJIT, Image
from ._mask import Mask
from ._video import _TensorVideoType, _TensorVideoTypeJIT, _VideoType, _VideoTypeJIT, Video
if _WARN_ABOUT_BETA_TRANSFORMS:
import warnings
warnings.warn(_BETA_TRANSFORMS_WARNING)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x92\x03\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x12\n\x08mesh_url\x18\x0c \x01(\tH\x00\x12\x19\n\x0fpoint_cloud_url\x18\r \x01(\tH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\";\n\x12\x44ocumentArrayProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xd6\x01\n\x19\x44ocumentArrayStackedProto\x12+\n\x05list_\x18\x01 \x01(\x0b\x32\x1c.docarray.DocumentArrayProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\x62\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 633
_DOCUMENTPROTO._serialized_start = 636
_DOCUMENTPROTO._serialized_end = 766
_DOCUMENTPROTO_DATAENTRY._serialized_start = 702
_DOCUMENTPROTO_DATAENTRY._serialized_end = 766
_DOCUMENTARRAYPROTO._serialized_start = 768
_DOCUMENTARRAYPROTO._serialized_end = 827
_UNIONARRAYPROTO._serialized_start = 830
_UNIONARRAYPROTO._serialized_end = 964
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 967
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1181
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1108
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1181
# @@protoc_insertion_point(module_scope)
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: docarray.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
b'\n\x0e\x64ocarray.proto\x12\x08\x64ocarray\x1a\x1cgoogle/protobuf/struct.proto\"A\n\x11\x44\x65nseNdArrayProto\x12\x0e\n\x06\x62uffer\x18\x01 \x01(\x0c\x12\r\n\x05shape\x18\x02 \x03(\r\x12\r\n\x05\x64type\x18\x03 \x01(\t\"g\n\x0cNdArrayProto\x12*\n\x05\x64\x65nse\x18\x01 \x01(\x0b\x32\x1b.docarray.DenseNdArrayProto\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\"\x92\x03\n\tNodeProto\x12\x0e\n\x04\x62lob\x18\x01 \x01(\x0cH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x0e\n\x04text\x18\x03 \x01(\tH\x00\x12)\n\x06nested\x18\x04 \x01(\x0b\x32\x17.docarray.DocumentProtoH\x00\x12.\n\x06\x63hunks\x18\x05 \x01(\x0b\x32\x1c.docarray.DocumentArrayProtoH\x00\x12+\n\tembedding\x18\x06 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x11\n\x07\x61ny_url\x18\x07 \x01(\tH\x00\x12\x13\n\timage_url\x18\x08 \x01(\tH\x00\x12\x12\n\x08text_url\x18\t \x01(\tH\x00\x12\x0c\n\x02id\x18\n \x01(\tH\x00\x12.\n\x0ctorch_tensor\x18\x0b \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x12\x12\n\x08mesh_url\x18\x0c \x01(\tH\x00\x12\x19\n\x0fpoint_cloud_url\x18\r \x01(\tH\x00\x42\t\n\x07\x63ontent\"\x82\x01\n\rDocumentProto\x12/\n\x04\x64\x61ta\x18\x01 \x03(\x0b\x32!.docarray.DocumentProto.DataEntry\x1a@\n\tDataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.docarray.NodeProto:\x02\x38\x01\"?\n\x16\x44ocumentArrayListProto\x12%\n\x04\x64ocs\x18\x01 \x03(\x0b\x32\x17.docarray.DocumentProto\"\x86\x01\n\x0fUnionArrayProto\x12=\n\x0e\x64ocument_array\x18\x01 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x12)\n\x07ndarray\x18\x02 \x01(\x0b\x32\x16.docarray.NdArrayProtoH\x00\x42\t\n\x07\x63ontent\"\xda\x01\n\x19\x44ocumentArrayStackedProto\x12/\n\x05list_\x18\x01 \x01(\x0b\x32 .docarray.DocumentArrayListProto\x12\x41\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x30.docarray.DocumentArrayStackedProto.ColumnsEntry\x1aI\n\x0c\x43olumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.docarray.UnionArrayProto:\x02\x38\x01\"\x88\x01\n\x12\x44ocumentArrayProto\x12\x31\n\x05list_\x18\x01 \x01(\x0b\x32 .docarray.DocumentArrayListProtoH\x00\x12\x34\n\x05stack\x18\x02 \x01(\x0b\x32#.docarray.DocumentArrayStackedProtoH\x00\x42\t\n\x07\x63ontentb\x06proto3'
)
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'docarray_pb2', globals())
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_DOCUMENTPROTO_DATAENTRY._options = None
_DOCUMENTPROTO_DATAENTRY._serialized_options = b'8\001'
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._options = None
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_options = b'8\001'
_DENSENDARRAYPROTO._serialized_start = 58
_DENSENDARRAYPROTO._serialized_end = 123
_NDARRAYPROTO._serialized_start = 125
_NDARRAYPROTO._serialized_end = 228
_NODEPROTO._serialized_start = 231
_NODEPROTO._serialized_end = 633
_DOCUMENTPROTO._serialized_start = 636
_DOCUMENTPROTO._serialized_end = 766
_DOCUMENTPROTO_DATAENTRY._serialized_start = 702
_DOCUMENTPROTO_DATAENTRY._serialized_end = 766
_DOCUMENTARRAYLISTPROTO._serialized_start = 768
_DOCUMENTARRAYLISTPROTO._serialized_end = 831
_UNIONARRAYPROTO._serialized_start = 834
_UNIONARRAYPROTO._serialized_end = 968
_DOCUMENTARRAYSTACKEDPROTO._serialized_start = 971
_DOCUMENTARRAYSTACKEDPROTO._serialized_end = 1189
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_start = 1116
_DOCUMENTARRAYSTACKEDPROTO_COLUMNSENTRY._serialized_end = 1189
_DOCUMENTARRAYPROTO._serialized_start = 1192
_DOCUMENTARRAYPROTO._serialized_end = 1328
# @@protoc_insertion_point(module_scope)
|
import os
from typing import Dict
from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri
from hubble.executor.hubio import HubIO
from jina.constants import (
__default_composite_gateway__,
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
from jina import __version__
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'noblock_on_start',
'env',
'env_from_secret',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_composite_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
return is_valid_docker_uri(uses)
except ValueError:
return False
|
import os
from typing import Dict
from hubble.executor.helper import is_valid_docker_uri, parse_hub_uri
from hubble.executor.hubio import HubIO
from jina.constants import (
__default_composite_gateway__,
__default_executor__,
__default_grpc_gateway__,
__default_http_gateway__,
__default_websocket_gateway__,
)
from jina.enums import PodRoleType
def get_image_name(uses: str) -> str:
"""The image can be provided in different formats by the user.
This function converts it to an image name which can be understood by k8s.
It uses the Hub api to get the image name and the latest tag on Docker Hub.
If you don't want to rebuild image on Jina Hub,
you can set `JINA_HUB_NO_IMAGE_REBUILD` environment variable.
:param uses: image name
:return: normalized image name
"""
try:
rebuild_image = 'JINA_HUB_NO_IMAGE_REBUILD' not in os.environ
scheme, name, tag, secret = parse_hub_uri(uses)
meta_data, _ = HubIO.fetch_meta(
name, tag, secret=secret, rebuild_image=rebuild_image, force=True
)
image_name = meta_data.image_name
return image_name
except Exception:
if uses.startswith('docker'):
# docker:// is a valid requirement and user may want to put its own image
return uses.replace('docker://', '')
raise
def to_compatible_name(name: str) -> str:
"""Converts the deployment name to a valid name for K8s and docker compose.
:param name: name of the deployment
:return: compatible name
"""
return name.replace('/', '-').replace('_', '-').lower()
def get_base_executor_version():
"""
Get the version of jina to be used
:return: the version tag
"""
import requests
try:
from jina import __version__
url = 'https://registry.hub.docker.com/v2/repositories/jinaai/jina/tags'
result: Dict = requests.get(url, params={'name': __version__}).json()
if result.get('count', 0) > 0:
return __version__
else:
return 'master'
except:
return 'master'
def construct_runtime_container_args(cargs, uses_metas, uses_with, pod_type):
"""
Construct a set of Namespace arguments into a list of arguments to pass to a container entrypoint
:param cargs: The namespace arguments
:param uses_metas: The uses_metas to override
:param uses_with: The uses_with to override
:param pod_type: The pod_type
:return: Arguments to pass to container
"""
import json
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace_id',
'noblock_on_start',
'env',
}
if pod_type == PodRoleType.HEAD:
taboo.add('uses')
taboo.add('workspace')
if pod_type in {PodRoleType.WORKER, PodRoleType.GATEWAY}:
taboo.add('polling')
non_defaults = ArgNamespace.get_non_defaults_args(
cargs,
set_pod_parser(),
taboo=taboo,
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['executor'] + _args
if uses_metas is not None:
container_args.extend(['--uses-metas', json.dumps(uses_metas)])
if uses_with is not None:
container_args.extend(['--uses-with', json.dumps(uses_with)])
container_args.append('--native')
return container_args
def validate_uses(uses: str):
"""Validate uses argument
:param uses: uses argument
:return: boolean indicating whether is a valid uses to be used in K8s or docker compose
"""
# Uses can be either None (not specified), default gateway class, default executor or docker image
# None => deplyoment uses base container image and uses is determined inside container
# default gateway class or default executor => deployment uses base container and sets uses in command
# container images => deployment uses the specified container image and uses is defined by container
if (
uses is None
or uses
in [
__default_http_gateway__,
__default_websocket_gateway__,
__default_grpc_gateway__,
__default_composite_gateway__,
__default_executor__,
]
or uses.startswith('docker://')
):
return True
try:
return is_valid_docker_uri(uses)
except ValueError:
return False
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensors = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensors.points, np.ndarray)
def test_point_cloud_np():
pc = parse_obj_as(PointCloud3D, np.zeros((10, 3)))
assert (pc.tensors.points == np.zeros((10, 3))).all()
def test_point_cloud_torch():
pc = parse_obj_as(PointCloud3D, torch.zeros(10, 3))
assert (pc.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_tensorflow():
pc = parse_obj_as(PointCloud3D, tf.zeros((10, 3)))
assert tnp.allclose(pc.tensors.points.tensor, tf.zeros((10, 3)))
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
pc3: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=np.zeros((10, 3)),
pc3=torch.zeros(10, 3),
)
assert doc.pc.url == 'http://myurl.ply'
assert (doc.pc2.tensors.points == np.zeros((10, 3))).all()
assert (doc.pc3.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.tensorflow
def test_point_cloud_shortcut_doc_tf():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=tf.zeros((10, 3)),
)
assert doc.pc.url == 'http://myurl.ply'
assert tnp.allclose(doc.pc2.tensors.points.tensor, tf.zeros((10, 3)))
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDoc
from docarray.documents import PointCloud3D
from docarray.utils._internal.misc import is_tf_available
from docarray.utils._internal.pydantic import is_pydantic_v2
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensors = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensors.points, np.ndarray)
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_point_cloud_np():
pc = parse_obj_as(PointCloud3D, np.zeros((10, 3)))
assert (pc.tensors.points == np.zeros((10, 3))).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_point_cloud_torch():
pc = parse_obj_as(PointCloud3D, torch.zeros(10, 3))
assert (pc.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
@pytest.mark.tensorflow
def test_point_cloud_tensorflow():
pc = parse_obj_as(PointCloud3D, tf.zeros((10, 3)))
assert tnp.allclose(pc.tensors.points.tensor, tf.zeros((10, 3)))
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
pc3: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=np.zeros((10, 3)),
pc3=torch.zeros(10, 3),
)
assert doc.pc.url == 'http://myurl.ply'
assert (doc.pc2.tensors.points == np.zeros((10, 3))).all()
assert (doc.pc3.tensors.points == torch.zeros(10, 3)).all()
@pytest.mark.skipif(is_pydantic_v2, reason="Not working with pydantic v2 for now")
@pytest.mark.tensorflow
def test_point_cloud_shortcut_doc_tf():
class MyDoc(BaseDoc):
pc: PointCloud3D
pc2: PointCloud3D
doc = MyDoc(
pc='http://myurl.ply',
pc2=tf.zeros((10, 3)),
)
assert doc.pc.url == 'http://myurl.ply'
assert tnp.allclose(doc.pc2.tensors.points.tensor, tf.zeros((10, 3)))
|
"""Test IPEX LLM"""
import os
from typing import Any
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms import IpexLLM
model_ids_to_test = os.getenv("TEST_IPEXLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test, reason="TEST_IPEXLLM_MODEL_IDS environment variable not set."
)
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore[assignment]
device = os.getenv("TEST_IPEXLLM_MODEL_DEVICE") or "cpu"
def load_model(model_id: str) -> Any:
llm = IpexLLM.from_model_id(
model_id=model_id,
model_kwargs={
"temperature": 0,
"max_length": 16,
"trust_remote_code": True,
"device": device,
},
)
return llm
def load_model_more_types(model_id: str, load_in_low_bit: str) -> Any:
llm = IpexLLM.from_model_id(
model_id=model_id,
load_in_low_bit=load_in_low_bit,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
return llm
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_call(model_id: str) -> None:
"""Test valid call."""
llm = load_model(model_id)
output = llm.invoke("Hello!")
assert isinstance(output, str)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_asym_int4(model_id: str) -> None:
"""Test asym int4 data type."""
llm = load_model_more_types(model_id=model_id, load_in_low_bit="asym_int4")
output = llm.invoke("Hello!")
assert isinstance(output, str)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_generate(model_id: str) -> None:
"""Test valid generate."""
llm = load_model(model_id)
output = llm.generate(["Hello!"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_save_load_lowbit(model_id: str) -> None:
"""Test save and load lowbit model."""
saved_lowbit_path = "/tmp/saved_model"
llm = load_model(model_id)
llm.model.save_low_bit(saved_lowbit_path)
del llm
loaded_llm = IpexLLM.from_model_id_low_bit(
model_id=saved_lowbit_path,
tokenizer_id=model_id,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
output = loaded_llm.invoke("Hello!")
assert isinstance(output, str)
|
"""Test IPEX LLM"""
import os
from typing import Any
import pytest
from langchain_core.outputs import LLMResult
from langchain_community.llms import IpexLLM
model_ids_to_test = os.getenv("TEST_IPEXLLM_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test, reason="TEST_IPEXLLM_MODEL_IDS environment variable not set."
)
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore
device = os.getenv("TEST_IPEXLLM_MODEL_DEVICE") or "cpu"
def load_model(model_id: str) -> Any:
llm = IpexLLM.from_model_id(
model_id=model_id,
model_kwargs={
"temperature": 0,
"max_length": 16,
"trust_remote_code": True,
"device": device,
},
)
return llm
def load_model_more_types(model_id: str, load_in_low_bit: str) -> Any:
llm = IpexLLM.from_model_id(
model_id=model_id,
load_in_low_bit=load_in_low_bit,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
return llm
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_call(model_id: str) -> None:
"""Test valid call."""
llm = load_model(model_id)
output = llm.invoke("Hello!")
assert isinstance(output, str)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_asym_int4(model_id: str) -> None:
"""Test asym int4 data type."""
llm = load_model_more_types(model_id=model_id, load_in_low_bit="asym_int4")
output = llm.invoke("Hello!")
assert isinstance(output, str)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_generate(model_id: str) -> None:
"""Test valid generate."""
llm = load_model(model_id)
output = llm.generate(["Hello!"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_save_load_lowbit(model_id: str) -> None:
"""Test save and load lowbit model."""
saved_lowbit_path = "/tmp/saved_model"
llm = load_model(model_id)
llm.model.save_low_bit(saved_lowbit_path)
del llm
loaded_llm = IpexLLM.from_model_id_low_bit(
model_id=saved_lowbit_path,
tokenizer_id=model_id,
model_kwargs={"temperature": 0, "max_length": 16, "trust_remote_code": True},
)
output = loaded_llm.invoke("Hello!")
assert isinstance(output, str)
|
from __future__ import annotations
from collections.abc import Sequence
from copy import deepcopy
from typing import Any, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank"
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> list[dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
result_dicts = []
for res in results:
result_dicts.append(
{
"index": res.index,
"relevance_score": res.relevance_score,
}
)
return result_dicts
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
from __future__ import annotations
from copy import deepcopy
from typing import Any, Dict, List, Optional, Sequence, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import ConfigDict, model_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
@deprecated(
since="0.0.30", removal="1.0", alternative_import="langchain_cohere.CohereRerank"
)
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Any = None
"""Cohere client to use for compressing documents."""
top_n: Optional[int] = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
"""Cohere API key. Must be specified directly or via environment variable
COHERE_API_KEY."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
if not values.get("client"):
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values.get("user_agent", "langchain")
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
return values
def rerank(
self,
documents: Sequence[Union[str, Document, dict]],
query: str,
*,
model: Optional[str] = None,
top_n: Optional[int] = -1,
max_chunks_per_doc: Optional[int] = None,
) -> List[Dict[str, Any]]:
"""Returns an ordered list of documents ordered by their relevance to the provided query.
Args:
query: The query to use for reranking.
documents: A sequence of documents to rerank.
model: The model to use for re-ranking. Default to self.model.
top_n : The number of results to return. If None returns all results.
Defaults to self.top_n.
max_chunks_per_doc : The maximum number of chunks derived from a document.
""" # noqa: E501
if len(documents) == 0: # to avoid empty api call
return []
docs = [
doc.page_content if isinstance(doc, Document) else doc for doc in documents
]
model = model or self.model
top_n = top_n if (top_n is None or top_n > 0) else self.top_n
results = self.client.rerank(
query=query,
documents=docs,
model=model,
top_n=top_n,
max_chunks_per_doc=max_chunks_per_doc,
)
if hasattr(results, "results"):
results = getattr(results, "results")
result_dicts = []
for res in results:
result_dicts.append(
{
"index": res.index,
"relevance_score": res.relevance_score,
}
)
return result_dicts
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
compressed = []
for res in self.rerank(documents, query):
doc = documents[res["index"]]
doc_copy = Document(doc.page_content, metadata=deepcopy(doc.metadata))
doc_copy.metadata["relevance_score"] = res["relevance_score"]
compressed.append(doc_copy)
return compressed
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.densenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.densenet import preprocess_input as preprocess_input
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121
from keras.src.applications.densenet import DenseNet169
from keras.src.applications.densenet import DenseNet201
from keras.src.applications.densenet import decode_predictions
from keras.src.applications.densenet import preprocess_input
|
from keras.src.backend.tensorflow import core
from keras.src.backend.tensorflow import distribution_lib
from keras.src.backend.tensorflow import image
from keras.src.backend.tensorflow import linalg
from keras.src.backend.tensorflow import math
from keras.src.backend.tensorflow import nn
from keras.src.backend.tensorflow import numpy
from keras.src.backend.tensorflow import random
from keras.src.backend.tensorflow import tensorboard
from keras.src.backend.tensorflow.core import IS_THREAD_SAFE
from keras.src.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.tensorflow.core import Variable
from keras.src.backend.tensorflow.core import cast
from keras.src.backend.tensorflow.core import compute_output_spec
from keras.src.backend.tensorflow.core import cond
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.backend.tensorflow.core import convert_to_tensor
from keras.src.backend.tensorflow.core import device_scope
from keras.src.backend.tensorflow.core import is_tensor
from keras.src.backend.tensorflow.core import name_scope
from keras.src.backend.tensorflow.core import random_seed_dtype
from keras.src.backend.tensorflow.core import scatter
from keras.src.backend.tensorflow.core import shape
from keras.src.backend.tensorflow.core import stop_gradient
from keras.src.backend.tensorflow.core import vectorized_map
from keras.src.backend.tensorflow.rnn import cudnn_ok
from keras.src.backend.tensorflow.rnn import gru
from keras.src.backend.tensorflow.rnn import lstm
from keras.src.backend.tensorflow.rnn import rnn
|
from keras.src.backend.tensorflow import core
from keras.src.backend.tensorflow import distribution_lib
from keras.src.backend.tensorflow import image
from keras.src.backend.tensorflow import linalg
from keras.src.backend.tensorflow import math
from keras.src.backend.tensorflow import nn
from keras.src.backend.tensorflow import numpy
from keras.src.backend.tensorflow import random
from keras.src.backend.tensorflow import tensorboard
from keras.src.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.tensorflow.core import Variable
from keras.src.backend.tensorflow.core import cast
from keras.src.backend.tensorflow.core import compute_output_spec
from keras.src.backend.tensorflow.core import cond
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.backend.tensorflow.core import convert_to_tensor
from keras.src.backend.tensorflow.core import device_scope
from keras.src.backend.tensorflow.core import is_tensor
from keras.src.backend.tensorflow.core import name_scope
from keras.src.backend.tensorflow.core import random_seed_dtype
from keras.src.backend.tensorflow.core import scatter
from keras.src.backend.tensorflow.core import shape
from keras.src.backend.tensorflow.core import stop_gradient
from keras.src.backend.tensorflow.core import vectorized_map
from keras.src.backend.tensorflow.rnn import cudnn_ok
from keras.src.backend.tensorflow.rnn import gru
from keras.src.backend.tensorflow.rnn import lstm
from keras.src.backend.tensorflow.rnn import rnn
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.12',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.12',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0', 'jina-hubble-sdk>=0.13.1'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.10',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'redis': [
'redis>=4.3.0',
],
'benchmark': [
'pandas',
'seaborn',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.10',
'elasticsearch>=8.2.0',
'redis>=4.3.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(identifier, DTypePolicy):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.dtype_policies import dtype_policy
from keras.src.dtype_policies.dtype_policy import QUANTIZATION_MODES
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedDTypePolicy
from keras.src.dtype_policies.dtype_policy import QuantizedFloat8DTypePolicy
ALL_OBJECTS = {
DTypePolicy,
FloatDTypePolicy,
QuantizedDTypePolicy,
QuantizedFloat8DTypePolicy,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
@keras_export("keras.dtype_policies.serialize")
def serialize(dtype_policy):
"""Serializes `DTypePolicy` instance.
Args:
dtype_policy: A Keras `DTypePolicy` instance.
Returns:
`DTypePolicy` configuration dictionary.
"""
from keras.src.saving import serialization_lib
return serialization_lib.serialize_keras_object(dtype_policy)
@keras_export("keras.dtype_policies.deserialize")
def deserialize(config, custom_objects=None):
"""Deserializes a serialized `DTypePolicy` instance.
Args:
config: `DTypePolicy` configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.saving import serialization_lib
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.dtype_policies.get")
def get(identifier):
"""Retrieves a Keras `DTypePolicy` instance.
The `identifier` may be the string name of a `DTypePolicy` class.
>>> policy = dtype_policies.get("mixed_bfloat16")
>>> type(policy)
<class '...FloatDTypePolicy'>
You can also specify `config` of the dtype policy to this function by
passing dict containing `class_name` and `config` as an identifier. Also
note that the `class_name` must map to a `DTypePolicy` class
>>> identifier = {"class_name": "FloatDTypePolicy",
... "config": {"name": "float32"}}
>>> policy = dtype_policies.get(identifier)
>>> type(policy)
<class '...FloatDTypePolicy'>
Args:
identifier: A dtype policy identifier. One of `None` or string name of a
`DTypePolicy` or `DTypePolicy` configuration dictionary or a
`DTypePolicy` instance.
Returns:
A Keras `DTypePolicy` instance.
"""
from keras.src.dtype_policies.dtype_policy import (
_get_quantized_dtype_policy_by_str,
)
if identifier is None:
return dtype_policy.dtype_policy()
if isinstance(
identifier, (DTypePolicy, FloatDTypePolicy, QuantizedDTypePolicy)
):
return identifier
if isinstance(identifier, dict):
return deserialize(identifier)
if isinstance(identifier, str):
if identifier.startswith(QUANTIZATION_MODES):
return _get_quantized_dtype_policy_by_str(identifier)
else:
return FloatDTypePolicy(identifier)
try:
return FloatDTypePolicy(backend.standardize_dtype(identifier))
except:
raise ValueError(
"Cannot interpret `dtype` argument. Expected a string "
f"or an instance of DTypePolicy. Received: dtype={identifier}"
)
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
convert_image_dtype,
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from torchvision.transforms import InterpolationMode # usort: skip
from ._utils import is_simple_tensor # usort: skip
from ._meta import (
clamp_bounding_box,
convert_format_bounding_box,
convert_image_dtype,
to_dtype,
to_dtype_image_tensor,
to_dtype_video,
get_dimensions_image_tensor,
get_dimensions_image_pil,
get_dimensions,
get_num_frames_video,
get_num_frames,
get_image_num_channels,
get_num_channels_image_tensor,
get_num_channels_image_pil,
get_num_channels_video,
get_num_channels,
get_spatial_size_bounding_box,
get_spatial_size_image_tensor,
get_spatial_size_image_pil,
get_spatial_size_mask,
get_spatial_size_video,
get_spatial_size,
) # usort: skip
from ._augment import erase, erase_image_pil, erase_image_tensor, erase_video
from ._color import (
adjust_brightness,
adjust_brightness_image_pil,
adjust_brightness_image_tensor,
adjust_brightness_video,
adjust_contrast,
adjust_contrast_image_pil,
adjust_contrast_image_tensor,
adjust_contrast_video,
adjust_gamma,
adjust_gamma_image_pil,
adjust_gamma_image_tensor,
adjust_gamma_video,
adjust_hue,
adjust_hue_image_pil,
adjust_hue_image_tensor,
adjust_hue_video,
adjust_saturation,
adjust_saturation_image_pil,
adjust_saturation_image_tensor,
adjust_saturation_video,
adjust_sharpness,
adjust_sharpness_image_pil,
adjust_sharpness_image_tensor,
adjust_sharpness_video,
autocontrast,
autocontrast_image_pil,
autocontrast_image_tensor,
autocontrast_video,
equalize,
equalize_image_pil,
equalize_image_tensor,
equalize_video,
invert,
invert_image_pil,
invert_image_tensor,
invert_video,
posterize,
posterize_image_pil,
posterize_image_tensor,
posterize_video,
rgb_to_grayscale,
rgb_to_grayscale_image_pil,
rgb_to_grayscale_image_tensor,
solarize,
solarize_image_pil,
solarize_image_tensor,
solarize_video,
to_grayscale,
)
from ._geometry import (
affine,
affine_bounding_box,
affine_image_pil,
affine_image_tensor,
affine_mask,
affine_video,
center_crop,
center_crop_bounding_box,
center_crop_image_pil,
center_crop_image_tensor,
center_crop_mask,
center_crop_video,
crop,
crop_bounding_box,
crop_image_pil,
crop_image_tensor,
crop_mask,
crop_video,
elastic,
elastic_bounding_box,
elastic_image_pil,
elastic_image_tensor,
elastic_mask,
elastic_transform,
elastic_video,
five_crop,
five_crop_image_pil,
five_crop_image_tensor,
five_crop_video,
hflip, # TODO: Consider moving all pure alias definitions at the bottom of the file
horizontal_flip,
horizontal_flip_bounding_box,
horizontal_flip_image_pil,
horizontal_flip_image_tensor,
horizontal_flip_mask,
horizontal_flip_video,
pad,
pad_bounding_box,
pad_image_pil,
pad_image_tensor,
pad_mask,
pad_video,
perspective,
perspective_bounding_box,
perspective_image_pil,
perspective_image_tensor,
perspective_mask,
perspective_video,
resize,
resize_bounding_box,
resize_image_pil,
resize_image_tensor,
resize_mask,
resize_video,
resized_crop,
resized_crop_bounding_box,
resized_crop_image_pil,
resized_crop_image_tensor,
resized_crop_mask,
resized_crop_video,
rotate,
rotate_bounding_box,
rotate_image_pil,
rotate_image_tensor,
rotate_mask,
rotate_video,
ten_crop,
ten_crop_image_pil,
ten_crop_image_tensor,
ten_crop_video,
vertical_flip,
vertical_flip_bounding_box,
vertical_flip_image_pil,
vertical_flip_image_tensor,
vertical_flip_mask,
vertical_flip_video,
vflip,
)
from ._misc import (
gaussian_blur,
gaussian_blur_image_pil,
gaussian_blur_image_tensor,
gaussian_blur_video,
normalize,
normalize_image_tensor,
normalize_video,
)
from ._temporal import uniform_temporal_subsample, uniform_temporal_subsample_video
from ._type_conversion import pil_to_tensor, to_image_pil, to_image_tensor, to_pil_image
from ._deprecated import get_image_size, to_tensor # usort: skip
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.github.toolkit import (
BranchName,
CommentOnIssue,
CreateFile,
CreatePR,
CreateReviewRequest,
DeleteFile,
DirectoryPath,
GetIssue,
GetPR,
GitHubToolkit,
NoInput,
ReadFile,
SearchCode,
SearchIssuesAndPRs,
UpdateFile,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NoInput": "langchain_community.agent_toolkits.github.toolkit",
"GetIssue": "langchain_community.agent_toolkits.github.toolkit",
"CommentOnIssue": "langchain_community.agent_toolkits.github.toolkit",
"GetPR": "langchain_community.agent_toolkits.github.toolkit",
"CreatePR": "langchain_community.agent_toolkits.github.toolkit",
"CreateFile": "langchain_community.agent_toolkits.github.toolkit",
"ReadFile": "langchain_community.agent_toolkits.github.toolkit",
"UpdateFile": "langchain_community.agent_toolkits.github.toolkit",
"DeleteFile": "langchain_community.agent_toolkits.github.toolkit",
"DirectoryPath": "langchain_community.agent_toolkits.github.toolkit",
"BranchName": "langchain_community.agent_toolkits.github.toolkit",
"SearchCode": "langchain_community.agent_toolkits.github.toolkit",
"CreateReviewRequest": "langchain_community.agent_toolkits.github.toolkit",
"SearchIssuesAndPRs": "langchain_community.agent_toolkits.github.toolkit",
"GitHubToolkit": "langchain_community.agent_toolkits.github.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BranchName",
"CommentOnIssue",
"CreateFile",
"CreatePR",
"CreateReviewRequest",
"DeleteFile",
"DirectoryPath",
"GetIssue",
"GetPR",
"GitHubToolkit",
"NoInput",
"ReadFile",
"SearchCode",
"SearchIssuesAndPRs",
"UpdateFile",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.github.toolkit import (
BranchName,
CommentOnIssue,
CreateFile,
CreatePR,
CreateReviewRequest,
DeleteFile,
DirectoryPath,
GetIssue,
GetPR,
GitHubToolkit,
NoInput,
ReadFile,
SearchCode,
SearchIssuesAndPRs,
UpdateFile,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NoInput": "langchain_community.agent_toolkits.github.toolkit",
"GetIssue": "langchain_community.agent_toolkits.github.toolkit",
"CommentOnIssue": "langchain_community.agent_toolkits.github.toolkit",
"GetPR": "langchain_community.agent_toolkits.github.toolkit",
"CreatePR": "langchain_community.agent_toolkits.github.toolkit",
"CreateFile": "langchain_community.agent_toolkits.github.toolkit",
"ReadFile": "langchain_community.agent_toolkits.github.toolkit",
"UpdateFile": "langchain_community.agent_toolkits.github.toolkit",
"DeleteFile": "langchain_community.agent_toolkits.github.toolkit",
"DirectoryPath": "langchain_community.agent_toolkits.github.toolkit",
"BranchName": "langchain_community.agent_toolkits.github.toolkit",
"SearchCode": "langchain_community.agent_toolkits.github.toolkit",
"CreateReviewRequest": "langchain_community.agent_toolkits.github.toolkit",
"SearchIssuesAndPRs": "langchain_community.agent_toolkits.github.toolkit",
"GitHubToolkit": "langchain_community.agent_toolkits.github.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NoInput",
"GetIssue",
"CommentOnIssue",
"GetPR",
"CreatePR",
"CreateFile",
"ReadFile",
"UpdateFile",
"DeleteFile",
"DirectoryPath",
"BranchName",
"SearchCode",
"CreateReviewRequest",
"SearchIssuesAndPRs",
"GitHubToolkit",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MomentoChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MomentoChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MomentoChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import MomentoChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MomentoChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MomentoChatMessageHistory",
]
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Post-process embeddings from VGGish."""
import numpy as np
from .vggish_params import *
class Postprocessor(object):
"""Post-processes VGGish embeddings.
The initial release of AudioSet included 128-D VGGish embeddings for each
segment of AudioSet. These released embeddings were produced by applying
a PCA transformation (technically, a whitening transform is included as well)
and 8-bit quantization to the raw embedding output from VGGish, in order to
stay compatible with the YouTube-8M project which provides visual embeddings
in the same format for a large set of YouTube videos. This class implements
the same PCA (with whitening) and quantization transformations.
"""
def __init__(self, pca_params_npz_path):
"""Constructs a postprocessor.
Args:
pca_params_npz_path: Path to a NumPy-format .npz file that
contains the PCA parameters used in postprocessing.
"""
params = np.load(pca_params_npz_path)
self._pca_matrix = params[PCA_EIGEN_VECTORS_NAME]
# Load means into a column vector for easier broadcasting later.
self._pca_means = params[PCA_MEANS_NAME].reshape(-1, 1)
assert self._pca_matrix.shape == (
EMBEDDING_SIZE, EMBEDDING_SIZE), (
'Bad PCA matrix shape: %r' % (self._pca_matrix.shape,))
assert self._pca_means.shape == (EMBEDDING_SIZE, 1), (
'Bad PCA means shape: %r' % (self._pca_means.shape,))
def postprocess(self, embeddings_batch):
"""Applies postprocessing to a batch of embeddings.
Args:
embeddings_batch: An nparray of shape [batch_size, embedding_size]
containing output from the embedding layer of VGGish.
Returns:
An nparray of the same shape as the input but of type uint8,
containing the PCA-transformed and quantized version of the input.
"""
assert len(embeddings_batch.shape) == 2, (
'Expected 2-d batch, got %r' % (embeddings_batch.shape,))
assert embeddings_batch.shape[1] == EMBEDDING_SIZE, (
'Bad batch shape: %r' % (embeddings_batch.shape,))
# Apply PCA.
# - Embeddings come in as [batch_size, embedding_size].
# - Transpose to [embedding_size, batch_size].
# - Subtract pca_means column vector from each column.
# - Premultiply by PCA matrix of shape [output_dims, input_dims]
# where both are are equal to embedding_size in our case.
# - Transpose result back to [batch_size, embedding_size].
pca_applied = np.dot(self._pca_matrix,
(embeddings_batch.T - self._pca_means)).T
# Quantize by:
# - clipping to [min, max] range
clipped_embeddings = np.clip(
pca_applied, QUANTIZE_MIN_VAL,
QUANTIZE_MAX_VAL)
# - convert to 8-bit in range [0.0, 255.0]
quantized_embeddings = (
(clipped_embeddings - QUANTIZE_MIN_VAL) *
(255.0 /
(QUANTIZE_MAX_VAL - QUANTIZE_MIN_VAL)))
# - cast 8-bit float to uint8
quantized_embeddings = quantized_embeddings.astype(np.uint8)
return quantized_embeddings
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Post-process embeddings from VGGish."""
import numpy as np
from vggish.vggish_params import *
class Postprocessor(object):
"""Post-processes VGGish embeddings.
The initial release of AudioSet included 128-D VGGish embeddings for each
segment of AudioSet. These released embeddings were produced by applying
a PCA transformation (technically, a whitening transform is included as well)
and 8-bit quantization to the raw embedding output from VGGish, in order to
stay compatible with the YouTube-8M project which provides visual embeddings
in the same format for a large set of YouTube videos. This class implements
the same PCA (with whitening) and quantization transformations.
"""
def __init__(self, pca_params_npz_path):
"""Constructs a postprocessor.
Args:
pca_params_npz_path: Path to a NumPy-format .npz file that
contains the PCA parameters used in postprocessing.
"""
params = np.load(pca_params_npz_path)
self._pca_matrix = params[PCA_EIGEN_VECTORS_NAME]
# Load means into a column vector for easier broadcasting later.
self._pca_means = params[PCA_MEANS_NAME].reshape(-1, 1)
assert self._pca_matrix.shape == (
EMBEDDING_SIZE, EMBEDDING_SIZE), (
'Bad PCA matrix shape: %r' % (self._pca_matrix.shape,))
assert self._pca_means.shape == (EMBEDDING_SIZE, 1), (
'Bad PCA means shape: %r' % (self._pca_means.shape,))
def postprocess(self, embeddings_batch):
"""Applies postprocessing to a batch of embeddings.
Args:
embeddings_batch: An nparray of shape [batch_size, embedding_size]
containing output from the embedding layer of VGGish.
Returns:
An nparray of the same shape as the input but of type uint8,
containing the PCA-transformed and quantized version of the input.
"""
assert len(embeddings_batch.shape) == 2, (
'Expected 2-d batch, got %r' % (embeddings_batch.shape,))
assert embeddings_batch.shape[1] == EMBEDDING_SIZE, (
'Bad batch shape: %r' % (embeddings_batch.shape,))
# Apply PCA.
# - Embeddings come in as [batch_size, embedding_size].
# - Transpose to [embedding_size, batch_size].
# - Subtract pca_means column vector from each column.
# - Premultiply by PCA matrix of shape [output_dims, input_dims]
# where both are are equal to embedding_size in our case.
# - Transpose result back to [batch_size, embedding_size].
pca_applied = np.dot(self._pca_matrix,
(embeddings_batch.T - self._pca_means)).T
# Quantize by:
# - clipping to [min, max] range
clipped_embeddings = np.clip(
pca_applied, QUANTIZE_MIN_VAL,
QUANTIZE_MAX_VAL)
# - convert to 8-bit in range [0.0, 255.0]
quantized_embeddings = (
(clipped_embeddings - QUANTIZE_MIN_VAL) *
(255.0 /
(QUANTIZE_MAX_VAL - QUANTIZE_MIN_VAL)))
# - cast 8-bit float to uint8
quantized_embeddings = quantized_embeddings.astype(np.uint8)
return quantized_embeddings
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
from sentence_transformers.util import is_training_available
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
@pytest.mark.skipif(
not is_training_available(), reason='Sentence Transformers was not installed with the `["train"]` extra.'
)
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
from __future__ import annotations
import pytest
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer
@pytest.mark.parametrize(
("revision", "expected_base_revision"),
[
("f3cb857cba53019a20df283396bcca179cf051a4", "f3cb857cba53019a20df283396bcca179cf051a4"),
("f3cb857", "f3cb857"),
("main", "valid-revision"),
(None, "valid-revision"),
],
)
def test_model_card_data(revision, expected_base_revision) -> None:
model_name = "sentence-transformers-testing/stsb-bert-tiny-safetensors"
model = SentenceTransformer(model_name, revision=revision)
assert model.model_card_data.base_model == model_name
if expected_base_revision == "valid-revision":
assert model.model_card_data.base_model_revision
assert len(model.model_card_data.base_model_revision) == 40
else:
assert model.model_card_data.base_model_revision == expected_base_revision
def test_generated_from_trainer_tag(stsb_bert_tiny_model: SentenceTransformer) -> None:
model = stsb_bert_tiny_model
assert "generated_from_trainer" not in model.model_card_data.tags
SentenceTransformerTrainer(model)
assert "generated_from_trainer" in model.model_card_data.tags
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.param_schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.param_schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.param_schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.param_schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
import pathlib
from typing import Any, BinaryIO, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> list[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: tuple[str, BinaryIO]) -> list[tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: tuple[np.ndarray, np.ndarray]) -> dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: list[IterDataPipe]) -> IterDataPipe[dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[ImageDoc] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
da3 = DocumentArray[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
|
from typing import Dict, List, Optional, Set
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import Image
from docarray.utils.reduce import reduce, reduce_all
class InnerDoc(BaseDocument):
integer: int
inner_list: List
class MMDoc(BaseDocument):
text: str = ''
price: int = 0
categories: Optional[List[str]] = None
image: Optional[Image] = None
matches: Optional[DocumentArray] = None
matches_with_same_id: Optional[DocumentArray] = None
opt_int: Optional[int] = None
test_set: Optional[Set] = None
inner_doc: Optional[InnerDoc] = None
test_dict: Optional[Dict] = None
@pytest.fixture
def doc1():
return MMDoc(
text='hey here',
categories=['a', 'b', 'c'],
price=10,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'a'},
inner_doc=InnerDoc(integer=2, inner_list=['c', 'd']),
test_dict={'a': 0, 'b': 2, 'd': 4, 'z': 3},
)
@pytest.fixture
def doc2(doc1):
return MMDoc(
id=doc1.id,
text='hey here 2',
categories=['d', 'e', 'f'],
price=5,
opt_int=5,
matches=DocumentArray[MMDoc]([MMDoc()]),
matches_with_same_id=DocumentArray[MMDoc](
[MMDoc(id='a', matches=DocumentArray[MMDoc]([MMDoc()]))]
),
test_set={'a', 'b'},
inner_doc=InnerDoc(integer=3, inner_list=['a', 'b']),
test_dict={'a': 10, 'b': 10, 'c': 3, 'z': None},
)
def test_reduce_different_ids():
da1 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
da2 = DocumentArray[MMDoc]([MMDoc() for _ in range(10)])
result = reduce(da1, da2)
assert len(result) == 20
# da1 is changed in place (no extra memory)
assert len(da1) == 20
def test_reduce(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
result = reduce(da1, da2)
assert len(result) == 3
# da1 is changed in place (no extra memory)
assert len(da1) == 3
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == ['a', 'b', 'c', 'd', 'e', 'f']
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b']
def test_reduce_all(doc1, doc2):
da1 = DocumentArray[MMDoc]([doc1, MMDoc()])
da2 = DocumentArray[MMDoc]([MMDoc(), doc2])
da3 = DocumentArray[MMDoc]([MMDoc(), MMDoc(), doc1])
result = reduce_all([da1, da2, da3])
assert len(result) == 5
# da1 is changed in place (no extra memory)
assert len(da1) == 5
merged_doc = result[0]
assert merged_doc.text == 'hey here 2'
assert merged_doc.categories == [
'a',
'b',
'c',
'd',
'e',
'f',
'a',
'b',
'c',
'd',
'e',
'f',
]
assert len(merged_doc.matches) == 2
assert merged_doc.opt_int == 5
assert merged_doc.price == 5
assert merged_doc.test_set == {'a', 'b'}
assert len(merged_doc.matches_with_same_id) == 1
assert len(merged_doc.matches_with_same_id[0].matches) == 2
assert merged_doc.inner_doc.integer == 3
assert merged_doc.inner_doc.inner_list == ['c', 'd', 'a', 'b', 'c', 'd', 'a', 'b']
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
EXPECTED_TEXTS = [
"Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"
]
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, revision="refs/pr/1").to(
torch_device
)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Helium model."""
import unittest
from transformers import AutoModelForCausalLM, AutoTokenizer, HeliumConfig, is_torch_available
from transformers.testing_utils import (
require_read_token,
require_torch,
slow,
torch_device,
)
from ...test_configuration_common import ConfigTester
from ..gemma.test_modeling_gemma import GemmaModelTest, GemmaModelTester
if is_torch_available():
import torch
from transformers import (
HeliumForCausalLM,
HeliumForSequenceClassification,
HeliumForTokenClassification,
HeliumModel,
)
class HeliumModelTester(GemmaModelTester):
if is_torch_available():
config_class = HeliumConfig
model_class = HeliumModel
for_causal_lm_class = HeliumForCausalLM
for_sequence_class = HeliumForSequenceClassification
for_token_class = HeliumForTokenClassification
@require_torch
class HeliumModelTest(GemmaModelTest, unittest.TestCase):
all_model_classes = (
(HeliumModel, HeliumForCausalLM, HeliumForSequenceClassification, HeliumForTokenClassification)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": HeliumModel,
"text-classification": HeliumForSequenceClassification,
"token-classification": HeliumForTokenClassification,
"text-generation": HeliumForCausalLM,
"zero-shot": HeliumForSequenceClassification,
}
if is_torch_available()
else {}
)
test_headmasking = False
test_pruning = False
_is_stateful = True
model_split_percents = [0.5, 0.6]
def setUp(self):
self.model_tester = HeliumModelTester(self)
self.config_tester = ConfigTester(self, config_class=HeliumConfig, hidden_size=37)
@slow
# @require_torch_gpu
class HeliumIntegrationTest(unittest.TestCase):
input_text = ["Hello, today is a great day to"]
@require_read_token
def test_model_2b(self):
model_id = "kyutai/helium-1-preview"
EXPECTED_TEXTS = [
"Hello, today is a great day to start a new project. I have been working on a new project for a while now and I have"
]
model = AutoModelForCausalLM.from_pretrained(
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, revision="refs/pr/1"
).to(torch_device)
tokenizer = AutoTokenizer.from_pretrained(model_id, revision="refs/pr/1")
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
self.assertEqual(output_text, EXPECTED_TEXTS)
|
import numpy as np
import pytest
import torch
from docarray import Document, DocumentArray
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
return batch.stack()
def test_len(batch):
assert len(batch) == 10
def test_getitem(batch):
for i in range(len(batch)):
print(i)
assert (batch[i].tensor == torch.zeros(3, 224, 224)).all()
def test_iterator(batch):
for doc in batch:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_stack_setter(batch):
batch.tensor = torch.ones(10, 3, 224, 224)
assert (batch.tensor == torch.ones(10, 3, 224, 224)).all()
def test_stack_optional(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
def test_stack_numpy():
class Image(Document):
tensor: NdArray[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (batch._columns['tensor'] == np.zeros((10, 3, 224, 224))).all()
assert (batch.tensor == np.zeros((10, 3, 224, 224))).all()
assert batch.tensor.ctypes.data == batch._columns['tensor'].ctypes.data
batch.unstack()
def test_stack(batch):
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
assert batch._columns['tensor'].data_ptr() == batch.tensor.data_ptr()
for doc, tensor in zip(batch, batch.tensor):
assert doc.tensor.data_ptr() == tensor.data_ptr()
for i in range(len(batch)):
assert batch[i].tensor.data_ptr() == batch.tensor[i].data_ptr()
def test_stack_mod_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
assert (
batch._columns['img']._columns['tensor'] == torch.zeros(10, 3, 224, 224)
).all()
assert (batch.img.tensor == torch.zeros(10, 3, 224, 224)).all()
assert (
batch._columns['img']._columns['tensor'].data_ptr()
== batch.img.tensor.data_ptr()
)
def test_convert_to_da(batch):
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_unstack_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch = batch.stack()
da = batch.unstack()
for doc in da:
assert (doc.img.tensor == torch.zeros(3, 224, 224)).all()
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
def test_proto_stacked_mode_numpy():
class MyDoc(Document):
tensor: NdArray[3, 224, 224]
da = DocumentArray[MyDoc](
[MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
da = da.stack()
da.from_protobuf(da.to_protobuf())
def test_stack_call():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
da = da.stack()
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
def test_context_manager():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
da = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
with da.stacked_mode() as da:
assert len(da) == 10
assert da.tensor.shape == (10, 3, 224, 224)
da.tensor = torch.ones(10, 3, 224, 224)
tensor = da.tensor
assert isinstance(tensor, list)
for doc in da:
assert (doc.tensor == torch.ones(3, 224, 224)).all()
|
from typing import Optional
import numpy as np
import pytest
import torch
from docarray import Document, DocumentArray
from docarray.typing import NdArray, TorchTensor
def test_stack():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch.stack()
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
assert batch._columns['tensor'].data_ptr() == batch.tensor.data_ptr()
for doc, tensor in zip(batch, batch.tensor):
assert doc.tensor.data_ptr() == tensor.data_ptr()
for i in range(len(batch)):
assert batch[i].tensor.data_ptr() == batch.tensor[i].data_ptr()
def test_stack_mod_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch.stack()
assert (
batch._columns['img']._columns['tensor'] == torch.zeros(10, 3, 224, 224)
).all()
assert (batch.img.tensor == torch.zeros(10, 3, 224, 224)).all()
assert (
batch._columns['img']._columns['tensor'].data_ptr()
== batch.img.tensor.data_ptr()
)
def test_unstack():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch.stack()
batch.unstack()
for doc in batch:
assert (doc.tensor == torch.zeros(3, 224, 224)).all()
def test_unstack_nested_document():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
class MMdoc(Document):
img: Image
batch = DocumentArray[MMdoc](
[MMdoc(img=Image(tensor=torch.zeros(3, 224, 224))) for _ in range(10)]
)
batch.stack()
batch.unstack()
def test_stack_runtime_error():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch.stack()
with pytest.raises(RuntimeError):
batch.append([])
def test_context_stack():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
assert not (batch.is_stacked())
with batch.stacked_mode():
assert batch.is_stacked()
assert not (batch.is_stacked())
def test_context_not_stack():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch.stack()
assert batch.is_stacked()
with batch.unstacked_mode():
assert not (batch.is_stacked())
assert batch.is_stacked()
def test_stack_setter():
class Image(Document):
tensor: TorchTensor[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch.stack()
batch.tensor = torch.ones(10, 3, 224, 224)
assert (batch.tensor == torch.ones(10, 3, 224, 224)).all()
def test_stack_optional():
class Image(Document):
tensor: Optional[TorchTensor[3, 224, 224]]
batch = DocumentArray[Image](
[Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)]
)
batch.stack()
assert (batch._columns['tensor'] == torch.zeros(10, 3, 224, 224)).all()
assert (batch.tensor == torch.zeros(10, 3, 224, 224)).all()
def test_stack_numpy():
class Image(Document):
tensor: NdArray[3, 224, 224]
batch = DocumentArray[Image](
[Image(tensor=np.zeros((3, 224, 224))) for _ in range(10)]
)
batch.stack()
assert (batch._columns['tensor'] == np.zeros((10, 3, 224, 224))).all()
assert (batch.tensor == np.zeros((10, 3, 224, 224))).all()
assert batch.tensor.ctypes.data == batch._columns['tensor'].ctypes.data
batch.unstack()
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .ddq_detr_head import DDQDETRHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .grounding_dino_head import GroundingDINOHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead', 'DDQDETRHead', 'GroundingDINOHead'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .atss_vlfusion_head import ATSSVLFusionHead
from .autoassign_head import AutoAssignHead
from .boxinst_head import BoxInstBboxHead, BoxInstMaskHead
from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
from .centernet_head import CenterNetHead
from .centernet_update_head import CenterNetUpdateHead
from .centripetal_head import CentripetalHead
from .condinst_head import CondInstBboxHead, CondInstMaskHead
from .conditional_detr_head import ConditionalDETRHead
from .corner_head import CornerHead
from .dab_detr_head import DABDETRHead
from .ddod_head import DDODHead
from .ddq_detr_head import DDQDETRHead
from .deformable_detr_head import DeformableDETRHead
from .detr_head import DETRHead
from .dino_head import DINOHead
from .embedding_rpn_head import EmbeddingRPNHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .lad_head import LADHead
from .ld_head import LDHead
from .mask2former_head import Mask2FormerHead
from .maskformer_head import MaskFormerHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .rtmdet_head import RTMDetHead, RTMDetSepBNHead
from .rtmdet_ins_head import RTMDetInsHead, RTMDetInsSepBNHead
from .sabl_retina_head import SABLRetinaHead
from .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead
from .solov2_head import SOLOV2Head
from .ssd_head import SSDHead
from .tood_head import TOODHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet
from .yolo_head import YOLOV3Head
from .yolof_head import YOLOFHead
from .yolox_head import YOLOXHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTProtonet', 'YOLOV3Head', 'PAAHead', 'SABLRetinaHead',
'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead', 'CascadeRPNHead',
'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead', 'DETRHead', 'YOLOFHead',
'DeformableDETRHead', 'CenterNetHead', 'YOLOXHead', 'SOLOHead',
'DecoupledSOLOHead', 'DecoupledSOLOLightHead', 'SOLOV2Head', 'LADHead',
'TOODHead', 'MaskFormerHead', 'Mask2FormerHead', 'DDODHead',
'CenterNetUpdateHead', 'RTMDetHead', 'RTMDetSepBNHead', 'CondInstBboxHead',
'CondInstMaskHead', 'RTMDetInsHead', 'RTMDetInsSepBNHead',
'BoxInstBboxHead', 'BoxInstMaskHead', 'ConditionalDETRHead', 'DINOHead',
'ATSSVLFusionHead', 'DABDETRHead', 'DDQDETRHead'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import FirestoreChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FirestoreChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FirestoreChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import FirestoreChatMessageHistory
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FirestoreChatMessageHistory": "langchain_community.chat_message_histories"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FirestoreChatMessageHistory",
]
|
from jina.serve.runtimes.gateway.gateway import BaseGateway
from jina.serve.runtimes.servers.grpc import GRPCServer
__all__ = ['GRPCGateway']
class GRPCGateway(GRPCServer, BaseGateway):
"""
:class:`GRPCGateway` is a GRPCServer that can be loaded from YAML as any other Gateway
"""
pass
|
from jina.serve.runtimes.gateway.grpc.gateway import GRPCGateway
__all__ = ['GRPCGateway']
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory,
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'.",
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
],
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
import warnings
from abc import ABC
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.chat_history import (
BaseChatMessageHistory,
InMemoryChatMessageHistory,
)
from langchain_core.memory import BaseMemory
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field
from langchain.memory.utils import get_prompt_input_key
@deprecated(
since="0.3.1",
removal="1.0.0",
message=(
"Please see the migration guide at: "
"https://python.langchain.com/docs/versions/migrating_memory/"
),
)
class BaseChatMemory(BaseMemory, ABC):
"""Abstract base class for chat memory.
**ATTENTION** This abstraction was created prior to when chat models had
native tool calling capabilities.
It does **NOT** support native tool calling capabilities for chat models and
will fail SILENTLY if used with a chat model that has native tool calling.
DO NOT USE THIS ABSTRACTION FOR NEW CODE.
"""
chat_memory: BaseChatMessageHistory = Field(
default_factory=InMemoryChatMessageHistory
)
output_key: Optional[str] = None
input_key: Optional[str] = None
return_messages: bool = False
def _get_input_output(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> tuple[str, str]:
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) == 1:
output_key = next(iter(outputs.keys()))
elif "output" in outputs:
output_key = "output"
warnings.warn(
f"'{self.__class__.__name__}' got multiple output keys:"
f" {outputs.keys()}. The default 'output' key is being used."
f" If this is not desired, please manually set 'output_key'."
)
else:
msg = (
f"Got multiple output keys: {outputs.keys()}, cannot "
f"determine which to store in memory. Please set the "
f"'output_key' explicitly."
)
raise ValueError(msg)
else:
output_key = self.output_key
return inputs[prompt_input_key], outputs[output_key]
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Save context from this conversation to buffer."""
input_str, output_str = self._get_input_output(inputs, outputs)
await self.chat_memory.aadd_messages(
[
HumanMessage(content=input_str),
AIMessage(content=output_str),
]
)
def clear(self) -> None:
"""Clear memory contents."""
self.chat_memory.clear()
async def aclear(self) -> None:
"""Clear memory contents."""
await self.chat_memory.aclear()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
init_default_scope(cfg.get('default_scope', 'mmdet'))
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
draw_pred=False,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
register_all_modules()
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
draw_pred=False,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise NotImplementedError(
"FlopsLoss is not intended to be used directly. Use it as a regulizer within the SpladeLoss class."
)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor]) -> torch.Tensor:
if self.threshold is not None:
l0_norm = (embeddings != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings = embeddings * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
from __future__ import annotations
from collections.abc import Iterable
import torch
from torch import Tensor, nn
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class FlopsLoss(nn.Module):
def __init__(self, model: SparseEncoder, threshold: float = None) -> None:
"""
FlopsLoss implements a regularization technique to promote sparsity in sparse encoder models.
It calculates the squared L2 norm of the mean embedding vector, which helps reduce the number of floating-point
operations (FLOPs) required during inference by encouraging more zero values in the embeddings.
It can use a threshold to ignore embeddings with too few non-zero elements.
This loss is used as a regularization component within other losses like :class:`SpladeLoss` rather than
being used as a standalone loss function.
Args:
model: SparseEncoder model to be regularized
threshold: Optional threshold for the number of non-zero elements in the embeddings.
If specified, only embeddings with more than this number of non-zero elements will be considered.
This can help to ignore embeddings that are too sparse and may not contribute meaningfully to the loss.
References:
- For further details, see: https://arxiv.org/pdf/2004.05665 for the general FLOPS loss and https://arxiv.org/pdf/2504.14839 for FLOPS with thresholds, a.k.a. FLOPS with l0 masking.
Relations:
- Used as a component within :class:`SpladeLoss` to regularize both query and document embeddings
Example:
- This loss is typically used within the :class:`SpladeLoss` class, which combines it with other loss components.
"""
super().__init__()
self.model = model
self.threshold = threshold
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
# Compute the embeddings and distribute them to anchor and candidates (positive and optionally negatives)
embeddings = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
return self.compute_loss_from_embeddings(embeddings)
def compute_loss_from_embeddings(self, embeddings: list[torch.Tensor], embeddings_type: str) -> torch.Tensor:
if embeddings_type == "query":
embeddings_to_use = embeddings[0] # (batch_size, embedding_dim)
else:
embeddings_to_use = torch.cat(embeddings[1:]) # (batch_size * (1 + num_negatives), embedding_dim)
if self.threshold is not None:
l0_norm = (embeddings_to_use != 0).sum(dim=1)
mask = (l0_norm > self.threshold).float()
embeddings_to_use = embeddings_to_use * mask.unsqueeze(1)
return torch.sum(torch.mean(embeddings_to_use, dim=0) ** 2)
@property
def citation(self) -> str:
return """
@article{paria2020minimizing,
title={Minimizing flops to learn efficient sparse representations},
author={Paria, Biswajit and Yeh, Chih-Kuan and Yen, Ian EH and Xu, Ning and Ravikumar, Pradeep and P{\'o}czos, Barnab{\'a}s},
journal={arXiv preprint arXiv:2004.05665},
year={2020}
}
"""
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_ffmpeg")
import torchaudio._torchaudio_ffmpeg
except OSError as err:
raise ImportError(
"Stream API requires FFmpeg libraries (libavformat and such). Please install FFmpeg 4."
) from err
try:
torch.ops.torchaudio.ffmpeg_init()
except RuntimeError as err:
raise RuntimeError(
"Stream API requires FFmpeg binding. Please set USE_FFMPEG=1 when building from source."
) from err
global _INITIALIZED
_INITIALIZED = True
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not _INITIALIZED:
_init_extension()
from . import _stream_reader
item = getattr(_stream_reader, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
"StreamReaderSourceVideoStream",
"StreamReaderOutputStream",
]
def _init_extension():
import torch
import torchaudio
try:
torchaudio._extension._load_lib("libtorchaudio_ffmpeg")
except OSError as err:
raise ImportError(
"Stream API requires FFmpeg libraries (libavformat and such). Please install FFmpeg 4."
) from err
try:
torch.ops.torchaudio.ffmpeg_init()
except RuntimeError as err:
raise RuntimeError(
"Stream API requires FFmpeg binding. Please set USE_FFMPEG=1 when building from source."
) from err
global _INITIALIZED
_INITIALIZED = True
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
if not _INITIALIZED:
_init_extension()
from . import _stream_reader
item = getattr(_stream_reader, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
from enum import Enum
from typing import Any, Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
) -> None:
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self) -> Dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
from enum import Enum
from typing import Dict, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class TripletDistanceMetric(Enum):
"""The metric for the triplet loss"""
COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
class TripletLoss(nn.Module):
def __init__(
self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5
):
"""
This class implements triplet loss. Given a triplet of (anchor, positive, negative),
the loss minimizes the distance between anchor and positive while it maximizes the distance
between anchor and negative. It compute the following loss function:
``loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0)``.
Margin is an important hyperparameter and needs to be tuned respectively.
Args:
model: SentenceTransformerModel
distance_metric: Function to compute distance between two
embeddings. The class TripletDistanceMetric contains
common distance metrices that can be used.
triplet_margin: The negative should be at least this much
further away from the anchor than the positive.
References:
- For further details, see: https://en.wikipedia.org/wiki/Triplet_loss
Requirements:
1. (anchor, positive, negative) triplets
Inputs:
+---------------------------------------+--------+
| Texts | Labels |
+=======================================+========+
| (anchor, positive, negative) triplets | none |
+---------------------------------------+--------+
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"anchor": ["It's nice weather outside today.", "He drove to work."],
"positive": ["It's so sunny.", "He took the car to the office."],
"negative": ["It's quite rainy, sadly.", "She walked to the store."],
})
loss = losses.TripletLoss(model=model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super(TripletLoss, self).__init__()
self.model = model
self.distance_metric = distance_metric
self.triplet_margin = triplet_margin
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
rep_anchor, rep_pos, rep_neg = reps
distance_pos = self.distance_metric(rep_anchor, rep_pos)
distance_neg = self.distance_metric(rep_anchor, rep_neg)
losses = F.relu(distance_pos - distance_neg + self.triplet_margin)
return losses.mean()
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(TripletDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "TripletDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "triplet_margin": self.triplet_margin}
@property
def citation(self) -> str:
return """
@misc{hermans2017defense,
title={In Defense of the Triplet Loss for Person Re-Identification},
author={Alexander Hermans and Lucas Beyer and Bastian Leibe},
year={2017},
eprint={1703.07737},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
"""
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity: Active Dimensions: 55.6, Sparsity Ratio: 0.9982
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseMSEEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
student_model = SparseEncoder("prithivida/Splade_PP_en_v1")
teacher_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load any dataset with some texts
dataset = load_dataset("sentence-transformers/stsb", split="validation")
sentences = dataset["sentence1"] + dataset["sentence2"]
# Given queries, a corpus and a mapping with relevant documents, the SparseMSEEvaluator computes different MSE metrics.
mse_evaluator = SparseMSEEvaluator(
source_sentences=sentences,
target_sentences=sentences,
teacher_model=teacher_model,
name="stsb-dev",
)
results = mse_evaluator(student_model)
"""
MSE evaluation (lower = better) on the stsb-dev dataset:
MSE (*100): 0.035540
Model Sparsity Stats: Row Non-Zero Mean: 55.60933303833008, Row Sparsity Mean: 0.9981780648231506
"""
# Print the results
print(f"Primary metric: {mse_evaluator.primary_metric}")
# => Primary metric: stsb-dev_negative_mse
print(f"Primary metric value: {results[mse_evaluator.primary_metric]:.4f}")
# => Primary metric value: -0.0355
|
import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
def test_batch_iterate(
input_size: int, input_iterable: list[str], expected_output: list[list[str]]
) -> None:
"""Test batching function."""
assert list(batch_iterate(input_size, input_iterable)) == expected_output
|
import pytest
from langchain_core.utils.iter import batch_iterate
@pytest.mark.parametrize(
("input_size", "input_iterable", "expected_output"),
[
(2, [1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(3, [10, 20, 30, 40, 50], [[10, 20, 30], [40, 50]]),
(1, [100, 200, 300], [[100], [200], [300]]),
(4, [], []),
],
)
def test_batch_iterate(
input_size: int, input_iterable: list[str], expected_output: list[str]
) -> None:
"""Test batching function."""
assert list(batch_iterate(input_size, input_iterable)) == expected_output
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.structures import ReIDDataSample
from mmdet.utils import register_all_modules
class TestLinearReIDHead(TestCase):
@classmethod
def setUpClass(cls) -> None:
register_all_modules()
head_cfg = dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=128,
fc_channels=64,
out_channels=32,
num_classes=2,
loss_cls=dict(type='mmpretrain.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))
cls.head = MODELS.build(head_cfg)
cls.inputs = (torch.rand(4, 128), torch.rand(4, 128))
cls.data_samples = [
ReIDDataSample().set_gt_label(label) for label in (0, 0, 1, 1)
]
def test_forward(self):
outputs = self.head(self.inputs)
assert outputs.shape == (4, 32)
def test_loss(self):
losses = self.head.loss(self.inputs, self.data_samples)
assert losses.keys() == {'triplet_loss', 'ce_loss', 'accuracy_top-1'}
assert losses['ce_loss'].item() >= 0
assert losses['triplet_loss'].item() >= 0
def test_predict(self):
predictions = self.head.predict(self.inputs, self.data_samples)
for pred in predictions:
assert isinstance(pred, ReIDDataSample)
assert isinstance(pred.pred_feature, torch.Tensor)
assert isinstance(pred.gt_label.label, torch.Tensor)
assert pred.pred_feature.shape == (32, )
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmdet.registry import MODELS
from mmdet.structures import ReIDDataSample
from mmdet.utils import register_all_modules
class TestLinearReIDHead(TestCase):
@classmethod
def setUpClass(cls) -> None:
register_all_modules()
head_cfg = dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=128,
fc_channels=64,
out_channels=32,
num_classes=2,
loss_cls=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))
cls.head = MODELS.build(head_cfg)
cls.inputs = (torch.rand(4, 128), torch.rand(4, 128))
cls.data_samples = [
ReIDDataSample().set_gt_label(label) for label in (0, 0, 1, 1)
]
def test_forward(self):
outputs = self.head(self.inputs)
assert outputs.shape == (4, 32)
def test_loss(self):
losses = self.head.loss(self.inputs, self.data_samples)
assert losses.keys() == {'triplet_loss', 'ce_loss', 'accuracy_top-1'}
assert losses['ce_loss'].item() >= 0
assert losses['triplet_loss'].item() >= 0
def test_predict(self):
predictions = self.head.predict(self.inputs, self.data_samples)
for pred in predictions:
assert isinstance(pred, ReIDDataSample)
assert isinstance(pred.pred_feature, torch.Tensor)
assert isinstance(pred.gt_label.label, torch.Tensor)
assert pred.pred_feature.shape == (32, )
|
from __future__ import annotations
from collections.abc import Collection
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
@dataclass
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "labels", "score", "scores"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
# If the label column is a list/tuple/collection, we create a list of tensors
if isinstance(features[0][label_column], Collection):
batch["label"] = [torch.tensor(row[label_column]) for row in features]
else:
# Otherwise, if it's e.g. single values, we create a tensor
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
batch[column_name] = [row[column_name] for row in features]
return batch
|
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Callable
import torch
from sentence_transformers.data_collator import SentenceTransformerDataCollator
@dataclass
class CrossEncoderDataCollator(SentenceTransformerDataCollator):
"""Collator for a CrossEncoder model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/sentence_transformer/training_overview.html
It is important that the columns are in the expected order. For example, if your dataset has columns
"answer", "question" in that order, then the MultipleNegativesRankingLoss will consider
"answer" as the anchor and "question" as the positive, and it will (unexpectedly) optimize for
"given the answer, what is the question?".
"""
tokenize_fn: Callable
valid_label_columns: list[str] = field(default_factory=lambda: ["label", "labels", "score", "scores"])
_warned_columns: set[tuple[str]] = field(default_factory=set, init=False, repr=False)
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
column_names = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in column_names:
column_names.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# TODO:
# if tuple(column_names) not in self._warned_columns:
# self.maybe_warn_about_column_order(column_names)
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in column_names:
batch["label"] = torch.tensor([row[label_column] for row in features])
column_names.remove(label_column)
break
for column_name in column_names:
# If the prompt length has been set, we should add it to the batch
if column_name.endswith("_prompt_length") and column_name[: -len("_prompt_length")] in column_names:
batch[column_name] = torch.tensor([row[column_name] for row in features], dtype=torch.int)
continue
batch[column_name] = [row[column_name] for row in features]
return batch
|
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array import DocVec
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image]([Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)])
return batch.stack()
@pytest.mark.proto
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
@pytest.mark.proto
def test_proto_stacked_mode_numpy():
class MyDoc(BaseDoc):
tensor: NdArray[3, 224, 224]
da = DocList[MyDoc]([MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)])
da = da.stack()
da.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDoc):
image: NdArray
da = DocList[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocVec.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocVec)
|
import numpy as np
import pytest
import torch
from docarray import BaseDoc, DocArray
from docarray.array import DocArrayStacked
from docarray.typing import NdArray, TorchTensor
@pytest.fixture()
def batch():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocArray[Image]([Image(tensor=torch.zeros(3, 224, 224)) for _ in range(10)])
return batch.stack()
@pytest.mark.proto
def test_proto_stacked_mode_torch(batch):
batch.from_protobuf(batch.to_protobuf())
@pytest.mark.proto
def test_proto_stacked_mode_numpy():
class MyDoc(BaseDoc):
tensor: NdArray[3, 224, 224]
da = DocArray[MyDoc]([MyDoc(tensor=np.zeros((3, 224, 224))) for _ in range(10)])
da = da.stack()
da.from_protobuf(da.to_protobuf())
@pytest.mark.proto
def test_stacked_proto():
class CustomDocument(BaseDoc):
image: NdArray
da = DocArray[CustomDocument](
[CustomDocument(image=np.zeros((3, 224, 224))) for _ in range(10)]
).stack()
da2 = DocArrayStacked.from_protobuf(da.to_protobuf())
assert isinstance(da2, DocArrayStacked)
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogView3PlusTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogView3PlusTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogView3PlusTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
original_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
target_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
crop_coords = torch.tensor([0, 0]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"original_size": original_size,
"target_size": target_size,
"crop_coords": crop_coords,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"in_channels": 4,
"num_layers": 2,
"attention_head_dim": 4,
"num_attention_heads": 2,
"out_channels": 4,
"text_embed_dim": 8,
"time_embed_dim": 8,
"condition_dim": 2,
"pos_embed_max_size": 8,
"sample_size": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogView3PlusTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import CogView3PlusTransformer2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class CogView3PlusTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = CogView3PlusTransformer2DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
model_split_percents = [0.7, 0.6, 0.6]
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
original_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
target_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
crop_coords = torch.tensor([0, 0]).unsqueeze(0).repeat(batch_size, 1).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"original_size": original_size,
"target_size": target_size,
"crop_coords": crop_coords,
"timestep": timestep,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"patch_size": 2,
"in_channels": 4,
"num_layers": 2,
"attention_head_dim": 4,
"num_attention_heads": 2,
"out_channels": 4,
"text_embed_dim": 8,
"time_embed_dim": 8,
"condition_dim": 2,
"pos_embed_max_size": 8,
"sample_size": 8,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"CogView3PlusTransformer2DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
import pytest
from jina import Flow
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run(protocol):
f = Flow(protocols=protocol).add()
with f:
dry_run = f.is_flow_ready()
dry_run_negative = f.is_flow_ready()
assert dry_run
assert not dry_run_negative
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
@pytest.mark.parametrize('show_table', [True, False])
def test_profiling(protocol, show_table):
f = Flow(protocol=protocol).add(name='hello').add(name='world')
with f:
results = f.profiling(show_table=show_table)
assert results
assert 'hello' in results
assert 'world' in results
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_profiling_async(protocol):
f = Flow(protocol=protocol, asyncio=True).add(name='hello').add(name='world')
with f:
results = await f.profiling()
assert results
assert 'hello' in results
assert 'world' in results
|
import pytest
from jina import Flow
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
def test_dry_run(protocol):
f = Flow(protocol=protocol).add()
with f:
dry_run = f.is_flow_ready()
dry_run_negative = f.is_flow_ready()
assert dry_run
assert not dry_run_negative
@pytest.mark.parametrize('protocol', ['grpc', 'http', 'websocket'])
@pytest.mark.parametrize('show_table', [True, False])
def test_profiling(protocol, show_table):
f = Flow(protocol=protocol).add(name='hello').add(name='world')
with f:
results = f.profiling(show_table=show_table)
assert results
assert 'hello' in results
assert 'world' in results
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_profiling_async(protocol):
f = Flow(protocol=protocol, asyncio=True).add(name='hello').add(name='world')
with f:
results = await f.profiling()
assert results
assert 'hello' in results
assert 'world' in results
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
print("Start encoding corpus...")
start_time = time.time()
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with Seismic.
For more information, please refer to the documentation:
https://github.com/TusKANNy/seismic/blob/main/docs/Guidelines.md
All you need is installing the `pyseismic-lsr` package:
```
pip install pyseismic-lsr
```
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_seismic
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
corpus_index = None
while True:
# 5. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_seismic(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""Notion tool spec."""
from typing import Any, Dict, List, Optional, Type
import requests
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
from llama_index.readers.notion import NotionPageReader
SEARCH_URL = "https://api.notion.com/v1/search"
class NotionLoadDataSchema(BaseModel):
"""Notion load data schema."""
page_ids: Optional[List[str]] = None
database_id: Optional[str] = None
class NotionSearchDataSchema(BaseModel):
"""Notion search data schema."""
query: str
direction: Optional[str] = None
timestamp: Optional[str] = None
value: Optional[str] = None
property: Optional[str] = None
page_size: int = 100
class NotionToolSpec(BaseToolSpec):
"""
Notion tool spec.
Currently a simple wrapper around the data loader.
TODO: add more methods to the Notion spec.
"""
spec_functions = ["load_data", "search_data"]
def __init__(self, integration_token: Optional[str] = None) -> None:
"""Initialize with parameters."""
self.reader = NotionPageReader(integration_token=integration_token)
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""Return map from function name."""
if fn_name == "load_data":
return NotionLoadDataSchema
elif fn_name == "search_data":
return NotionSearchDataSchema
else:
raise ValueError(f"Invalid function name: {fn_name}")
def load_data(
self,
page_ids: Optional[List[str]] = None,
database_ids: Optional[List[str]] = None,
) -> str:
"""
Loads content from a set of page ids or database ids.
Don't use this endpoint if you don't know the page ids or database ids.
"""
page_ids = page_ids or []
docs = self.reader.load_data(page_ids=page_ids, database_ids=database_ids)
return "\n".join([doc.get_content() for doc in docs])
def search_data(
self,
query: str,
direction: Optional[str] = None,
timestamp: Optional[str] = None,
value: Optional[str] = None,
property: Optional[str] = None,
page_size: int = 100,
) -> List[Dict[str, Any]]:
"""
Search a list of relevant pages.
Contains metadata for each page (but not the page content).
params:
query: the title of the page or database to search for, which is fuzzy matched.
"""
payload: Dict[str, Any] = {
"query": query,
"page_size": page_size,
}
if direction is not None or timestamp is not None:
payload["sort"] = {}
if direction is not None:
payload["sort"]["direction"] = direction
if timestamp is not None:
payload["sort"]["timestamp"] = timestamp
if value is not None or property is not None:
payload["filter"] = {}
if value is not None:
payload["filter"]["value"] = value
if property is not None:
payload["filter"]["property"] = property
response = requests.post(SEARCH_URL, json=payload, headers=self.reader.headers)
response_json = response.json()
return response_json["results"]
|
"""Notion tool spec."""
from typing import Any, Dict, List, Optional, Type
import requests
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.tools.tool_spec.base import SPEC_FUNCTION_TYPE, BaseToolSpec
from llama_index.readers.notion import NotionPageReader
SEARCH_URL = "https://api.notion.com/v1/search"
class NotionLoadDataSchema(BaseModel):
"""Notion load data schema."""
page_ids: Optional[List[str]] = None
database_id: Optional[str] = None
class NotionSearchDataSchema(BaseModel):
"""Notion search data schema."""
query: str
direction: Optional[str] = None
timestamp: Optional[str] = None
value: Optional[str] = None
property: Optional[str] = None
page_size: int = 100
class NotionToolSpec(BaseToolSpec):
"""Notion tool spec.
Currently a simple wrapper around the data loader.
TODO: add more methods to the Notion spec.
"""
spec_functions = ["load_data", "search_data"]
def __init__(self, integration_token: Optional[str] = None) -> None:
"""Initialize with parameters."""
self.reader = NotionPageReader(integration_token=integration_token)
def get_fn_schema_from_fn_name(
self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None
) -> Optional[Type[BaseModel]]:
"""Return map from function name."""
if fn_name == "load_data":
return NotionLoadDataSchema
elif fn_name == "search_data":
return NotionSearchDataSchema
else:
raise ValueError(f"Invalid function name: {fn_name}")
def load_data(
self,
page_ids: Optional[List[str]] = None,
database_ids: Optional[List[str]] = None,
) -> str:
"""Loads content from a set of page ids or database ids.
Don't use this endpoint if you don't know the page ids or database ids.
"""
page_ids = page_ids or []
docs = self.reader.load_data(page_ids=page_ids, database_ids=database_ids)
return "\n".join([doc.get_content() for doc in docs])
def search_data(
self,
query: str,
direction: Optional[str] = None,
timestamp: Optional[str] = None,
value: Optional[str] = None,
property: Optional[str] = None,
page_size: int = 100,
) -> List[Dict[str, Any]]:
"""Search a list of relevant pages.
Contains metadata for each page (but not the page content).
params:
query: the title of the page or database to search for, which is fuzzy matched.
"""
payload: Dict[str, Any] = {
"query": query,
"page_size": page_size,
}
if direction is not None or timestamp is not None:
payload["sort"] = {}
if direction is not None:
payload["sort"]["direction"] = direction
if timestamp is not None:
payload["sort"]["timestamp"] = timestamp
if value is not None or property is not None:
payload["filter"] = {}
if value is not None:
payload["filter"]["value"] = value
if property is not None:
payload["filter"]["property"] = property
response = requests.post(SEARCH_URL, json=payload, headers=self.reader.headers)
response_json = response.json()
return response_json["results"]
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .CachedMultipleNegativesSymmetricRankingLoss import CachedMultipleNegativesSymmetricRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"CachedMultipleNegativesSymmetricRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
# CoSENTLoss must be imported before AnglELoss
from __future__ import annotations
from .CoSENTLoss import CoSENTLoss # isort: skip
from .AdaptiveLayerLoss import AdaptiveLayerLoss
from .AnglELoss import AnglELoss
from .BatchAllTripletLoss import BatchAllTripletLoss
from .BatchHardSoftMarginTripletLoss import BatchHardSoftMarginTripletLoss
from .BatchHardTripletLoss import BatchHardTripletLoss, BatchHardTripletLossDistanceFunction
from .BatchSemiHardTripletLoss import BatchSemiHardTripletLoss
from .CachedGISTEmbedLoss import CachedGISTEmbedLoss
from .CachedMultipleNegativesRankingLoss import CachedMultipleNegativesRankingLoss
from .ContrastiveLoss import ContrastiveLoss, SiameseDistanceMetric
from .ContrastiveTensionLoss import (
ContrastiveTensionDataLoader,
ContrastiveTensionLoss,
ContrastiveTensionLossInBatchNegatives,
)
from .CosineSimilarityLoss import CosineSimilarityLoss
from .DenoisingAutoEncoderLoss import DenoisingAutoEncoderLoss
from .GISTEmbedLoss import GISTEmbedLoss
from .MarginMSELoss import MarginMSELoss
from .Matryoshka2dLoss import Matryoshka2dLoss
from .MatryoshkaLoss import MatryoshkaLoss
from .MegaBatchMarginLoss import MegaBatchMarginLoss
from .MSELoss import MSELoss
from .MultipleNegativesRankingLoss import MultipleNegativesRankingLoss
from .MultipleNegativesSymmetricRankingLoss import MultipleNegativesSymmetricRankingLoss
from .OnlineContrastiveLoss import OnlineContrastiveLoss
from .SoftmaxLoss import SoftmaxLoss
from .TripletLoss import TripletDistanceMetric, TripletLoss
__all__ = [
"AdaptiveLayerLoss",
"CosineSimilarityLoss",
"SoftmaxLoss",
"MultipleNegativesRankingLoss",
"MultipleNegativesSymmetricRankingLoss",
"TripletLoss",
"TripletDistanceMetric",
"MarginMSELoss",
"MatryoshkaLoss",
"Matryoshka2dLoss",
"MSELoss",
"ContrastiveLoss",
"SiameseDistanceMetric",
"CachedGISTEmbedLoss",
"CachedMultipleNegativesRankingLoss",
"ContrastiveTensionLoss",
"ContrastiveTensionLossInBatchNegatives",
"ContrastiveTensionDataLoader",
"CoSENTLoss",
"AnglELoss",
"OnlineContrastiveLoss",
"MegaBatchMarginLoss",
"DenoisingAutoEncoderLoss",
"GISTEmbedLoss",
"BatchHardTripletLoss",
"BatchHardTripletLossDistanceFunction",
"BatchHardSoftMarginTripletLoss",
"BatchSemiHardTripletLoss",
"BatchAllTripletLoss",
]
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str, timeout=None) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:param timeout: timeout for urlopen. Only relevant if uri is not local
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
urlopen_kwargs = {'timeout': timeout} if timeout is not None else {}
with urllib.request.urlopen(req, **urlopen_kwargs) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{uri}` is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'wb') # type: ignore
return file_ctx
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
import os
import urllib.parse
import urllib.request
from contextlib import nullcontext
def _uri_to_blob(uri: str, timeout=None) -> bytes:
"""Convert uri to blob
Internally it reads uri into blob.
:param uri: the uri of Document
:param timeout: timeout for urlopen. Only relevant if uri is not local
:return: blob bytes.
"""
if urllib.parse.urlparse(uri).scheme in {'http', 'https', 'data'}:
req = urllib.request.Request(uri, headers={'User-Agent': 'Mozilla/5.0'})
urlopen_kwargs = {'timeout': timeout} if timeout is not None else {}
with urllib.request.urlopen(req, **urlopen_kwargs) as fp:
return fp.read()
elif os.path.exists(uri):
with open(uri, 'rb') as fp:
return fp.read()
else:
raise FileNotFoundError(f'`{uri}` is not a URL or a valid local path')
def _get_file_context(file):
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'wb')
return file_ctx
def _is_uri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return (
(scheme in {'http', 'https'})
or (scheme in {'data'})
or os.path.exists(value)
or os.access(os.path.dirname(value), os.W_OK)
)
def _is_datauri(value: str) -> bool:
scheme = urllib.parse.urlparse(value).scheme
return scheme in {'data'}
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluator(MSEEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
):
super().__init__(
source_sentences=source_sentences,
target_sentences=target_sentences,
teacher_model=teacher_model,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
logger.warning(
"The SparseMSEEvaluator is not handling the mse compute with sparse tensors yet. Memory issues may occur."
)
def __call__(
self,
model: SparseEncoder,
output_path: str = None,
epoch: int = -1,
steps: int = -1,
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=False,
**kwargs,
)
def store_metrics_in_model_card_data(
self,
model: SparseEncoder,
metrics: dict[str, Any],
epoch: int = 0,
step: int = 0,
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from sentence_transformers.evaluation import MSEEvaluator
if TYPE_CHECKING:
import numpy as np
from torch import Tensor
from sentence_transformers.sparse_encoder import SparseEncoder
logger = logging.getLogger(__name__)
class SparseMSEEvaluator(MSEEvaluator):
def __init__(
self,
source_sentences: list[str],
target_sentences: list[str],
teacher_model=None,
show_progress_bar: bool = False,
batch_size: int = 32,
name: str = "",
write_csv: bool = True,
truncate_dim: int | None = None,
):
return super().__init__(
source_sentences=source_sentences,
target_sentences=target_sentences,
teacher_model=teacher_model,
show_progress_bar=show_progress_bar,
batch_size=batch_size,
name=name,
write_csv=write_csv,
truncate_dim=truncate_dim,
)
def __call__(
self,
model: SparseEncoder,
output_path: str = None,
epoch: int = -1,
steps: int = -1,
) -> dict[str, float]:
return super().__call__(model=model, output_path=output_path, epoch=epoch, steps=steps)
def embed_inputs(
self,
model: SparseEncoder,
sentences: str | list[str] | np.ndarray,
**kwargs,
) -> Tensor:
return model.encode(
sentences,
batch_size=self.batch_size,
show_progress_bar=self.show_progress_bar,
convert_to_sparse_tensor=True,
**kwargs,
)
def store_metrics_in_model_card_data(
self,
model: SparseEncoder,
metrics: dict[str, Any],
epoch: int = 0,
step: int = 0,
) -> None:
model.model_card_data.set_evaluation_metrics(self, metrics, epoch=epoch, step=step)
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.autoembeddings import ChonkieAutoEmbedding
def test_class_init() -> None:
emb = ChonkieAutoEmbedding(model_name="all-MiniLM-L6-v2")
assert isinstance(emb, BaseEmbedding)
|
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.embeddings.autoembeddings import ChonkieAutoEmbedding
def test_class_init() -> None:
emb = ChonkieAutoEmbedding(model_name="all-MiniLM-L6-v2")
assert isinstance(emb, BaseEmbedding)
|
from torchaudio.utils import ffmpeg_utils
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoFFmpeg
@skipIfNoFFmpeg
class TestFFmpegUtils(PytorchTestCase):
"""Smoke test for ffmpeg_utils module"""
def tearDown(self):
ffmpeg_utils.set_log_level(8)
super().tearDown()
def test_get_log_level(self):
"""`get_log_level` does not exhibit abnormal behavior"""
for _ in range(10):
ffmpeg_utils.get_log_level()
def test_set_log_level(self):
"""`set_log_level` persists log level"""
for i in range(-100, 100):
ffmpeg_utils.set_log_level(i)
assert ffmpeg_utils.get_log_level() == i
def test_get_version(self):
"""`get_versions` does not crash"""
versions = ffmpeg_utils.get_versions()
assert set(versions.keys()) == {"libavutil", "libavcodec", "libavformat", "libavfilter", "libavdevice"}
def test_available_stuff(self):
"""get_encoders|decoders|muxers|demuxers|devices function does not segfault"""
ffmpeg_utils.get_demuxers()
ffmpeg_utils.get_muxers()
ffmpeg_utils.get_audio_decoders()
ffmpeg_utils.get_audio_encoders()
ffmpeg_utils.get_video_decoders()
ffmpeg_utils.get_video_encoders()
ffmpeg_utils.get_input_devices()
ffmpeg_utils.get_output_devices()
ffmpeg_utils.get_input_protocols()
ffmpeg_utils.get_output_protocols()
|
from torchaudio.utils import ffmpeg_utils
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoFFmpeg
@skipIfNoFFmpeg
class TestFFmpegUtils(PytorchTestCase):
"""Smoke test for ffmpeg_utils module"""
def tearDown(self):
ffmpeg_utils.set_log_level(8)
super().tearDown()
def test_get_log_level(self):
"""`get_log_level` does not exhibit abnormal behavior"""
for _ in range(10):
ffmpeg_utils.get_log_level()
def test_set_log_level(self):
"""`set_log_level` persists log level"""
for i in range(-100, 100):
ffmpeg_utils.set_log_level(i)
assert ffmpeg_utils.get_log_level() == i
def test_get_version(self):
"""`get_versions` does not crash"""
versions = ffmpeg_utils.get_versions()
assert set(versions.keys()) == {"libavutil", "libavcodec", "libavformat", "libavfilter", "libavdevice"}
|
# mypy: allow-untyped-defs
import functools
import hashlib
import inspect
import json
import logging
import os
import time
from typing import Any, Optional
import torch._inductor.config as config
from torch._inductor.codecache import cutlass_key
from torch._inductor.codegen.cuda import cutlass_utils, serialization
from torch._inductor.codegen.cuda.cuda_env import get_cuda_arch, get_cuda_version
from torch._inductor.codegen.cuda.serialization import get_cutlass_operation_serializer
from torch._inductor.runtime.cache_dir_utils import cache_dir
from torch._inductor.utils import clear_on_fresh_cache
log = logging.getLogger(__name__)
CONFIG_PREFIX: str = "configs"
def get_config_request_key(
arch: str,
cuda_version: str,
instantiation_level: str,
) -> str:
"""
Return a key for the full ops, based on cutlass key, arch, cuda version, instantiation level, and serialization.py file hash.
"""
# Get hash of serialization.py and cutlass_utils.py files using their module file paths
def get_file_hash(file_module):
file_path = inspect.getfile(file_module)
with open(file_path, "rb") as f:
return hashlib.sha256(f.read()).hexdigest()
serialization_hash = get_file_hash(serialization)
cutlass_utils_hash = get_file_hash(cutlass_utils)
hash_target = "-".join(
[
cutlass_key().hex(),
arch,
cuda_version,
instantiation_level,
serialization_hash,
cutlass_utils_hash,
]
)
return hashlib.sha256(hash_target.encode("utf-8")).hexdigest()[0:8]
def _generate_config_filename(request_key: str) -> str:
"""
Generate a filename for the full ops.
"""
return f"{CONFIG_PREFIX}_{request_key}.json"
@clear_on_fresh_cache
@functools.cache
def maybe_fetch_ops() -> Optional[list[Any]]:
"""
Fetch ops from databases.
"""
if config.force_disable_caches:
return None
# setup
arch: str = get_cuda_arch()
# get_cuda_version might return "12.4.0" or "12.4"
# but we want to use "12.4"
version: str = ".".join(get_cuda_version().split(".")[:2])
instantiation_level: str = config.cuda.cutlass_instantiation_level
# filename and filepath
request_key: str = get_config_request_key(arch, version, instantiation_level)
filename: str = _generate_config_filename(request_key)
filepath: str = os.path.join(cache_dir(), filename)
# try fetch
serialized_ops: Optional[list[str]] = None
start_time = time.time()
if os.path.isfile(filepath):
# locally
try:
with open(filepath) as f:
serialized_ops = json.load(f)
assert isinstance(serialized_ops, list), (
f"Expected serialized ops is a list, got {type(serialized_ops)}"
)
except Exception as e:
log.warning(
"Failed to load CUTLASS config %s from local cache: %s",
filename,
e,
)
serialized_ops = None
elif config.is_fbcode():
from torch._inductor.fb.cutlass_remote_cache import (
maybe_fetch_cutlass_configs_from_remote,
)
# from remote
serialized_ops = maybe_fetch_cutlass_configs_from_remote(filepath)
if serialized_ops is None:
return None
# deserialize
serializer = get_cutlass_operation_serializer()
full_ops = [serializer.deserialize(x) for x in serialized_ops] # type: ignore[union-attr]
log.info("Loaded ops from %s cache in %.3fs", filename, time.time() - start_time)
return full_ops
|
# mypy: allow-untyped-defs
import functools
import hashlib
import json
import logging
import os
import time
from typing import Any, Optional
import torch._inductor.config as config
from torch._inductor.codecache import cutlass_key
from torch._inductor.codegen.cuda.cuda_env import get_cuda_arch, get_cuda_version
from torch._inductor.codegen.cuda.serialization import get_cutlass_operation_serializer
from torch._inductor.runtime.cache_dir_utils import cache_dir
from torch._inductor.utils import clear_on_fresh_cache
log = logging.getLogger(__name__)
CONFIG_PREFIX: str = "configs"
def get_config_request_key(
arch: str,
cuda_version: str,
instantiation_level: str,
) -> str:
"""
Return a key for the full ops, based on cutlass key, arch, cuda version, and instantiation level.
"""
hash_target = "-".join(
[
cutlass_key().hex(),
arch,
cuda_version,
instantiation_level,
]
)
return hashlib.sha256(hash_target.encode("utf-8")).hexdigest()[0:8]
def _generate_config_filename(request_key: str) -> str:
"""
Generate a filename for the full ops.
"""
return f"{CONFIG_PREFIX}_{request_key}.json"
@clear_on_fresh_cache
@functools.cache
def maybe_fetch_ops() -> Optional[list[Any]]:
"""
Fetch ops from databases.
"""
if config.force_disable_caches:
return None
# setup
arch: str = get_cuda_arch()
# get_cuda_version might return "12.4.0" or "12.4"
# but we want to use "12.4"
version: str = ".".join(get_cuda_version().split(".")[:2])
instantiation_level: str = config.cuda.cutlass_instantiation_level
# filename and filepath
request_key: str = get_config_request_key(arch, version, instantiation_level)
filename: str = _generate_config_filename(request_key)
filepath: str = os.path.join(cache_dir(), filename)
# try fetch
serialized_ops: Optional[list[str]] = None
start_time = time.time()
if os.path.isfile(filepath):
# locally
try:
with open(filepath) as f:
serialized_ops = json.load(f)
assert isinstance(serialized_ops, list), (
f"Expected serialized ops is a list, got {type(serialized_ops)}"
)
except Exception as e:
log.warning(
"Failed to load CUTLASS config %s from local cache: %s",
filename,
e,
)
serialized_ops = None
elif config.is_fbcode():
from torch._inductor.fb.cutlass_remote_cache import (
maybe_fetch_cutlass_configs_from_remote,
)
# from remote
serialized_ops = maybe_fetch_cutlass_configs_from_remote(filepath)
if serialized_ops is None:
return None
# deserialize
serializer = get_cutlass_operation_serializer()
full_ops = [serializer.deserialize(x) for x in serialized_ops] # type: ignore[union-attr]
log.info("Loaded ops from %s cache in %.3fs", filename, time.time() - start_time)
return full_ops
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.21.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
list_datasets,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.20.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
list_datasets,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# isort: split
# Deprecated modules
from . import arrow_dataset as _arrow_dataset
from . import utils as _utils
from .exceptions import ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
from .utils import download_manager as _deprecated_download_manager
from .utils import info_utils as _deprecated_info_utils
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
_deprecated_info_utils.ExpectedMoreDownloadedFiles = ExpectedMoreDownloadedFiles
_deprecated_info_utils.ExpectedMoreSplits = ExpectedMoreSplits
_deprecated_info_utils.UnexpectedDownloadedFile = UnexpectedDownloadedFile
_deprecated_info_utils.UnexpectedSplits = UnexpectedSplits
del _arrow_dataset, _utils, _deprecated_download_manager
del _deprecated_info_utils, ExpectedMoreDownloadedFiles, ExpectedMoreSplits, UnexpectedDownloadedFile, UnexpectedSplits
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import packaging.version
REPLACE_PATTERNS = {
"init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
"setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
}
REPLACE_FILES = {
"init": "src/datasets/__init__.py",
"setup": "setup.py",
}
def update_version_in_file(fname, version, pattern):
"""Update the version in one file using a specific pattern."""
with open(fname, "r", encoding="utf-8", newline="\n") as f:
code = f.read()
re_pattern, replace = REPLACE_PATTERNS[pattern]
replace = replace.replace("VERSION", version)
code = re_pattern.sub(replace, code)
with open(fname, "w", encoding="utf-8", newline="\n") as f:
f.write(code)
def global_version_update(version):
"""Update the version in all needed files."""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
def get_version():
"""Reads the current version in the __init__."""
with open(REPLACE_FILES["init"], "r") as f:
code = f.read()
default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
return packaging.version.parse(default_version)
def pre_release_work(patch=False):
"""Do all the necessary pre-release steps."""
# First let's get the default version: base version if we are in dev, bump minor otherwise.
default_version = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version
elif patch:
default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
default_version = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
version = input(f"Which version are you releasing? [{default_version}]")
if len(version) == 0:
version = default_version
print(f"Updating version to {version}.")
global_version_update(version)
def post_release_work():
"""Do all the necesarry post-release steps."""
# First let's get the current version
current_version = get_version()
dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
current_version = current_version.base_version
# Check with the user we got that right.
version = input(f"Which version are we developing now? [{dev_version}]")
if len(version) == 0:
version = dev_version
print(f"Updating version to {version}.")
global_version_update(version)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether or not this is post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
args = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
CUSTOM_JS_FILE = "docs/source/_static/js/custom.js"
def update_custom_js(version):
"""Update the version table in the custom.js file."""
with open(CUSTOM_JS_FILE, encoding="utf-8", newline="\n") as f:
lines = f.readlines()
index = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion ="):
index += 1
lines[index] = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {"):
index += 1
# We go until the end
while not lines[index].startswith("}"):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(CUSTOM_JS_FILE, "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
args = parser.parse_args()
update_custom_js(args.version)
|
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
import logging
import os
import sys
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
script_folder_path = os.path.dirname(os.path.realpath(__file__))
# Limit torch to 4 threads
torch.set_num_threads(4)
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "stsb-distilroberta-base-v2"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
model.evaluate(dev_evaluator)
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
model.evaluate(test_evaluator)
|
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from datasets import load_dataset
import logging
import sys
import torch
import os
from sentence_transformers.similarity_functions import SimilarityFunction
script_folder_path = os.path.dirname(os.path.realpath(__file__))
# Limit torch to 4 threads
torch.set_num_threads(4)
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "stsb-distilroberta-base-v2"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
model.evaluate(dev_evaluator)
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
model.evaluate(test_evaluator)
|
import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, StorageReader, StorageWriter]:
if storage:
if checkpoint_id is not None:
storage.reset(checkpoint_id)
return storage
if not checkpoint_id:
raise RuntimeError(
"`checkpoint_id` must be specified if "
"storage_reader/storage_writer is None."
)
targets: list[type[Union[StorageReader, StorageWriter]]] = []
if reader:
targets = [
FileSystemReader,
]
else:
targets = [
FileSystemWriter,
]
try:
from ._fsspec_filesystem import FsspecReader, FsspecWriter
targets.append(FsspecReader if reader else FsspecWriter)
except Exception:
pass
for target in targets:
if target.validate_checkpoint_id(checkpoint_id):
storage = target(checkpoint_id) # type: ignore[call-arg]
storage.reset(checkpoint_id)
return storage
raise RuntimeError(
"Cannot detect which StorageReader or StorageWriter to use. "
"Please specify the storage_reader/storage_writer."
)
|
import os
from typing import Union
from .filesystem import FileSystemReader, FileSystemWriter
from .storage import StorageReader, StorageWriter
def _storage_setup(
storage: Union[StorageReader, StorageWriter, None],
checkpoint_id: Union[str, os.PathLike, None],
reader: bool = False,
) -> Union[None, StorageReader, StorageWriter]:
if storage:
if checkpoint_id is not None:
storage.reset(checkpoint_id)
return storage
if not checkpoint_id:
raise RuntimeError(
"`checkpoint_id` must be specificed if "
"storage_reader/storage_writer is None."
)
targets: list[type[Union[StorageReader, StorageWriter]]] = []
if reader:
targets = [
FileSystemReader,
]
else:
targets = [
FileSystemWriter,
]
try:
from ._fsspec_filesystem import FsspecReader, FsspecWriter
targets.append(FsspecReader if reader else FsspecWriter)
except Exception:
pass
for target in targets:
if target.validate_checkpoint_id(checkpoint_id):
storage = target(checkpoint_id) # type: ignore[call-arg]
storage.reset(checkpoint_id)
return storage
raise RuntimeError(
"Cannot detect which StorageReader or StorageWriter to use. "
"Please specify the storage_reader/storage_writer."
)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
YOLOAnchorGenerator)
from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
build_anchor_generator, build_prior_generator)
from .point_generator import MlvlPointGenerator, PointGenerator
from .utils import anchor_inside_flags, calc_region, images_to_levels
__all__ = [
'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
'PointGenerator', 'images_to_levels', 'calc_region',
'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator',
'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator'
]
|
from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
YOLOAnchorGenerator)
from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
build_anchor_generator, build_prior_generator)
from .point_generator import MlvlPointGenerator, PointGenerator
from .utils import anchor_inside_flags, calc_region, images_to_levels
__all__ = [
'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
'PointGenerator', 'images_to_levels', 'calc_region',
'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator',
'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator'
]
|
import os
from pathlib import Path
from subprocess import check_call
repo_root = Path(__file__).absolute().parent.parent
third_party_path = repo_root / "third_party"
def _read_file(path: Path) -> str:
with path.open(encoding="utf-8") as f:
return f.read().strip()
def _checkout_by_tag(repo: str, tag: str) -> None:
check_call(
[
"git",
"clone",
"--depth",
"1",
"--branch",
tag,
repo,
],
cwd=third_party_path,
)
def read_nccl_pin() -> str:
nccl_file = "nccl-cu12.txt"
if os.getenv("DESIRED_CUDA", os.getenv("CUDA_VERSION", "")).startswith("11"):
nccl_file = "nccl-cu11.txt"
nccl_pin_path = repo_root / ".ci" / "docker" / "ci_commit_pins" / nccl_file
return _read_file(nccl_pin_path)
def checkout_nccl() -> None:
release_tag = read_nccl_pin()
print(f"-- Checkout nccl release tag: {release_tag}")
nccl_basedir = third_party_path / "nccl"
if not nccl_basedir.exists():
_checkout_by_tag("https://github.com/NVIDIA/nccl", release_tag)
if __name__ == "__main__":
checkout_nccl()
|
import os
from pathlib import Path
from subprocess import check_call
repo_root = Path(__file__).absolute().parent.parent
third_party_path = repo_root / "third_party"
def _read_file(path: Path) -> str:
with path.open(encoding="utf-8") as f:
return f.read().strip()
def _checkout_by_tag(repo: str, tag: str) -> None:
check_call(
[
"git",
"clone",
"--depth",
"1",
"--branch",
tag,
repo,
],
cwd=third_party_path,
)
def read_nccl_pin() -> str:
nccl_file = "nccl-cu12.txt"
if os.getenv("DESIRED_CUDA", os.getenv("CUDA_VERSION", "")).startswith("11"):
nccl_file = "nccl-cu11.txt"
nccl_pin_path = repo_root / ".ci" / "docker" / "ci_commit_pins" / nccl_file
return _read_file(nccl_pin_path)
def checkout_nccl() -> None:
release_tag = read_nccl_pin()
print(f"-- Checkout nccl release tag: {release_tag}")
nccl_basedir = third_party_path / "nccl"
if not nccl_basedir.exists():
_checkout_by_tag("https://github.com/NVIDIA/nccl", release_tag)
def checkout_eigen() -> None:
eigen_tag = _read_file(third_party_path / "eigen_pin.txt")
print(f"-- Checkout Eigen release tag: {eigen_tag}")
eigen_basedir = third_party_path / "eigen"
if not eigen_basedir.exists():
_checkout_by_tag("https://gitlab.com/libeigen/eigen", eigen_tag)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
# If no arguments are given checkout all optional dependency
checkout_nccl()
checkout_eigen()
else:
# Otherwise just call top-level function of choice
globals()[sys.argv[1]]()
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demonstrated in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.linear_model import (
HuberRegressor,
LinearRegression,
RANSACRegressor,
TheilSenRegressor,
)
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [
("OLS", LinearRegression()),
("Theil-Sen", TheilSenRegressor(random_state=42)),
("RANSAC", RANSACRegressor(random_state=42)),
("HuberRegressor", HuberRegressor()),
]
colors = {
"OLS": "turquoise",
"Theil-Sen": "gold",
"RANSAC": "lightgreen",
"HuberRegressor": "black",
}
linestyle = {"OLS": "-", "Theil-Sen": "-.", "RANSAC": "--", "HuberRegressor": "--"}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
("Modeling Errors Only", X, y),
("Corrupt X, Small Deviants", X_errors, y),
("Corrupt y, Small Deviants", X, y_errors),
("Corrupt X, Large Deviants", X_errors_large, y),
("Corrupt y, Large Deviants", X, y_errors_large),
]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, "b+")
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(
x_plot,
y_plot,
color=colors[name],
linestyle=linestyle[name],
linewidth=lw,
label="%s: error = %.3f" % (name, mse),
)
legend_title = "Error of Mean\nAbsolute Deviation\nto Non-corrupt Data"
legend = plt.legend(
loc="upper right", frameon=False, title=legend_title, prop=dict(size="x-small")
)
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
"""
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worse than OLS.
- The scores of HuberRegressor may not be compared directly to both TheilSen
and RANSAC because it does not attempt to completely filter the outliers
but lessen their effect.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.linear_model import (
HuberRegressor,
LinearRegression,
RANSACRegressor,
TheilSenRegressor,
)
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [
("OLS", LinearRegression()),
("Theil-Sen", TheilSenRegressor(random_state=42)),
("RANSAC", RANSACRegressor(random_state=42)),
("HuberRegressor", HuberRegressor()),
]
colors = {
"OLS": "turquoise",
"Theil-Sen": "gold",
"RANSAC": "lightgreen",
"HuberRegressor": "black",
}
linestyle = {"OLS": "-", "Theil-Sen": "-.", "RANSAC": "--", "HuberRegressor": "--"}
lw = 3
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
("Modeling Errors Only", X, y),
("Corrupt X, Small Deviants", X_errors, y),
("Corrupt y, Small Deviants", X, y_errors),
("Corrupt X, Large Deviants", X_errors_large, y),
("Corrupt y, Large Deviants", X, y_errors_large),
]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, "b+")
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(
x_plot,
y_plot,
color=colors[name],
linestyle=linestyle[name],
linewidth=lw,
label="%s: error = %.3f" % (name, mse),
)
legend_title = "Error of Mean\nAbsolute Deviation\nto Non-corrupt Data"
legend = plt.legend(
loc="upper right", frameon=False, title=legend_title, prop=dict(size="x-small")
)
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
|
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :class:`sentence_transformers.models.Module`, base class for all input modules in the Sentence
Transformers library, i.e. modules that are used to process inputs and optionally also perform processing
in the forward pass.
This class provides a common interface for all input modules, including methods for loading and saving the module's
configuration and weights, as well as input processing. It also provides a method for performing the forward pass
of the module.
Three abstract methods are defined in this class, which must be implemented by subclasses:
- :meth:`sentence_transformers.models.Module.forward`: The forward pass of the module.
- :meth:`sentence_transformers.models.Module.save`: Save the module to disk.
- :meth:`sentence_transformers.models.InputModule.tokenize`: Tokenize the input texts and return a dictionary of tokenized features.
Optionally, you may also have to override:
- :meth:`sentence_transformers.models.Module.load`: Load the module from disk.
To assist with loading and saving the module, several utility methods are provided:
- :meth:`sentence_transformers.models.Module.load_config`: Load the module's configuration from a JSON file.
- :meth:`sentence_transformers.models.Module.load_file_path`: Load a file from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_dir_path`: Load a directory from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_torch_weights`: Load the PyTorch weights of the module, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.save_config`: Save the module's configuration to a JSON file.
- :meth:`sentence_transformers.models.Module.save_torch_weights`: Save the PyTorch weights of the module.
- :meth:`sentence_transformers.models.InputModule.save_tokenizer`: Save the tokenizer used by the module.
- :meth:`sentence_transformers.models.Module.get_config_dict`: Get the module's configuration as a dictionary.
And several class variables are defined to assist with loading and saving the module:
- :attr:`sentence_transformers.models.Module.config_file_name`: The name of the configuration file used to save the module's configuration.
- :attr:`sentence_transformers.models.Module.config_keys`: A list of keys used to save the module's configuration.
- :attr:`sentence_transformers.models.InputModule.save_in_root`: Whether to save the module's configuration in the root directory of the model or in a subdirectory named after the module.
- :attr:`sentence_transformers.models.InputModule.tokenizer`: The tokenizer used by the module.
"""
save_in_root: bool = True
tokenizer: PreTrainedTokenizerBase | Tokenizer
"""
The tokenizer used for tokenizing the input texts. It can be either a
:class:`transformers.PreTrainedTokenizerBase` subclass or a Tokenizer from the
``tokenizers`` library.
"""
@abstractmethod
def tokenize(self, texts: list[str], **kwargs) -> dict[str, torch.Tensor | Any]:
"""
Tokenizes the input texts and returns a dictionary of tokenized features.
Args:
texts (list[str]): List of input texts to tokenize.
**kwargs: Additional keyword arguments for tokenization, e.g. ``task``.
Returns:
dict[str, torch.Tensor | Any]: Dictionary containing tokenized features, e.g.
``{"input_ids": ..., "attention_mask": ...}``
"""
def save_tokenizer(self, output_path: str, **kwargs) -> None:
"""
Saves the tokenizer to the specified output path.
Args:
output_path (str): Path to save the tokenizer.
**kwargs: Additional keyword arguments for saving the tokenizer.
Returns:
None
"""
if not hasattr(self, "tokenizer"):
return
if isinstance(self.tokenizer, PreTrainedTokenizerBase):
self.tokenizer.save_pretrained(output_path, **kwargs)
elif isinstance(self.tokenizer, Tokenizer):
self.tokenizer.save(output_path, **kwargs)
return
|
from __future__ import annotations
from abc import abstractmethod
from typing import Any
import torch
from tokenizers import Tokenizer
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from sentence_transformers.models.Module import Module
class InputModule(Module):
"""
Subclass of :class:`sentence_transformers.models.Module`, base class for all input modules in the Sentence
Transformers library, i.e. modules that are used to process inputs and optionally also perform processing
in the forward pass.
This class provides a common interface for all input modules, including methods for loading and saving the module's
configuration and weights, as well as input processing. It also provides a method for performing the forward pass
of the module.
Three abstract methods are defined in this class, which must be implemented by subclasses:
- :meth:`sentence_transformers.models.Module.forward`: The forward pass of the module.
- :meth:`sentence_transformers.models.Module.save`: Save the module to disk.
- :meth:`sentence_transformers.models.InputModule.tokenize`: Tokenize the input texts and return a dictionary of tokenized features.
Optionally, you may also have to override:
- :meth:`sentence_transformers.models.Module.load`: Load the module from disk.
To assist with loading and saving the module, several utility methods are provided:
- :meth:`sentence_transformers.models.Module.load_config`: Load the module's configuration from a JSON file.
- :meth:`sentence_transformers.models.Module.load_file_path`: Load a file from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_dir_path`: Load a directory from the module's directory, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.load_torch_weights`: Load the PyTorch weights of the module, regardless of whether the module is saved locally or on Hugging Face.
- :meth:`sentence_transformers.models.Module.save_config`: Save the module's configuration to a JSON file.
- :meth:`sentence_transformers.models.Module.save_torch_weights`: Save the PyTorch weights of the module.
- :meth:`sentence_transformers.models.InputModule.save_tokenizer`: Save the tokenizer used by the module.
- :meth:`sentence_transformers.models.Module.get_config_dict`: Get the module's configuration as a dictionary.
And several class variables are defined to assist with loading and saving the module:
- :attr:`sentence_transformers.models.Module.config_file_name`: The name of the configuration file used to save the module's configuration.
- :attr:`sentence_transformers.models.Module.config_keys`: A list of keys used to save the module's configuration.
- :attr:`sentence_transformers.models.InputModule.save_in_root`: Whether to save the module's configuration in the root directory of the model or in a subdirectory named after the module.
- :attr:`sentence_transformers.models.InputModule.tokenizer`: The tokenizer used by the module.
"""
save_in_root: bool = True
tokenizer: PreTrainedTokenizerBase | Tokenizer
"""
The tokenizer used for tokenizing the input texts. It can be either a
:class:`transformers.PreTrainedTokenizerBase` subclass or a Tokenizer from the
``tokenizers`` library.
"""
@abstractmethod
def tokenize(self, texts: list[str], **kwargs) -> dict[str, torch.Tensor | Any]:
"""
Tokenizes the input texts and returns a dictionary of tokenized features.
Args:
texts (list[str]): List of input texts to tokenize.
**kwargs: Additional keyword arguments for tokenization.
Returns:
dict[str, torch.Tensor | Any]: Dictionary containing tokenized features, e.g.
``{"input_ids": ..., "attention_mask": ...}``
"""
def save_tokenizer(self, output_path: str, **kwargs) -> None:
"""
Saves the tokenizer to the specified output path.
Args:
output_path (str): Path to save the tokenizer.
**kwargs: Additional keyword arguments for saving the tokenizer.
Returns:
None
"""
if not hasattr(self, "tokenizer"):
return
if isinstance(self.tokenizer, PreTrainedTokenizerBase):
self.tokenizer.save_pretrained(output_path, **kwargs)
elif isinstance(self.tokenizer, Tokenizer):
self.tokenizer.save(output_path, **kwargs)
return
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [
dict(type='BatchFixedSizePad', size=image_size, pad_mask=True)
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
# use caffe norm
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../common/lsj_100e_coco_instance.py'
]
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='SyncBN', requires_grad=True)
# Use MMSyncBN that handles empty tensor in head. It can be changed to
# SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed
# Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205.
head_norm_cfg = dict(type='MMSyncBN', requires_grad=True)
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
# pad_size_divisor=32 is unnecessary in training but necessary
# in testing.
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
frozen_stages=-1,
norm_eval=False,
norm_cfg=norm_cfg,
init_cfg=None,
style='caffe'),
neck=dict(norm_cfg=norm_cfg),
rpn_head=dict(num_convs=2),
roi_head=dict(
bbox_head=dict(
type='Shared4Conv1FCBBoxHead',
conv_out_channels=256,
norm_cfg=head_norm_cfg),
mask_head=dict(norm_cfg=head_norm_cfg)))
file_client_args = dict(backend='disk')
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=image_size, ratio_range=(0.1, 2.0)),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(dataset=dict(dataset=dict(pipeline=train_pipeline)))
|
import pytest
from xgboost import testing as tm
from xgboost.testing.interaction_constraints import (
run_interaction_constraints,
training_accuracy,
)
class TestInteractionConstraints:
def test_exact_interaction_constraints(self) -> None:
run_interaction_constraints(tree_method="exact", device="cpu")
def test_hist_interaction_constraints(self) -> None:
run_interaction_constraints(tree_method="hist", device="cpu")
def test_approx_interaction_constraints(self) -> None:
run_interaction_constraints(tree_method="approx", device="cpu")
def test_interaction_constraints_feature_names(self) -> None:
with pytest.raises(ValueError):
constraints = [("feature_0", "feature_1")]
run_interaction_constraints(
tree_method="exact", device="cpu", interaction_constraints=constraints
)
with pytest.raises(ValueError):
constraints = [("feature_0", "feature_3")]
feature_names = ["feature_0", "feature_1", "feature_2"]
run_interaction_constraints(
tree_method="exact",
device="cpu",
feature_names=feature_names,
interaction_constraints=constraints,
)
constraints = [("feature_0", "feature_1")]
feature_names = ["feature_0", "feature_1", "feature_2"]
run_interaction_constraints(
tree_method="exact",
device="cpu",
feature_names=feature_names,
interaction_constraints=constraints,
)
constraints_lst = [["feature_0", "feature_1"], ["feature_2"]]
feature_names = ["feature_0", "feature_1", "feature_2"]
run_interaction_constraints(
tree_method="exact",
device="cpu",
feature_names=feature_names,
interaction_constraints=constraints_lst,
)
@pytest.mark.skipif(**tm.no_sklearn())
@pytest.mark.parametrize("tree_method", ["hist", "approx", "exact"])
def test_hist_training_accuracy(self, tree_method: str) -> None:
dpath = "demo/data/"
training_accuracy(tree_method=tree_method, dpath=dpath, device="cpu")
|
import numpy as np
import pytest
import xgboost
from xgboost import testing as tm
dpath = 'demo/data/'
rng = np.random.RandomState(1994)
class TestInteractionConstraints:
def run_interaction_constraints(
self, tree_method, feature_names=None, interaction_constraints='[[0, 1]]'
):
x1 = np.random.normal(loc=1.0, scale=1.0, size=1000)
x2 = np.random.normal(loc=1.0, scale=1.0, size=1000)
x3 = np.random.choice([1, 2, 3], size=1000, replace=True)
y = x1 + x2 + x3 + x1 * x2 * x3 \
+ np.random.normal(
loc=0.001, scale=1.0, size=1000) + 3 * np.sin(x1)
X = np.column_stack((x1, x2, x3))
dtrain = xgboost.DMatrix(X, label=y, feature_names=feature_names)
params = {
'max_depth': 3,
'eta': 0.1,
'nthread': 2,
'interaction_constraints': interaction_constraints,
'tree_method': tree_method
}
num_boost_round = 12
# Fit a model that only allows interaction between x1 and x2
bst = xgboost.train(
params, dtrain, num_boost_round, evals=[(dtrain, 'train')])
# Set all observations to have the same x3 values then increment
# by the same amount
def f(x):
tmat = xgboost.DMatrix(
np.column_stack((x1, x2, np.repeat(x, 1000))), feature_names=feature_names)
return bst.predict(tmat)
preds = [f(x) for x in [1, 2, 3]]
# Check incrementing x3 has the same effect on all observations
# since x3 is constrained to be independent of x1 and x2
# and all observations start off from the same x3 value
diff1 = preds[1] - preds[0]
assert np.all(np.abs(diff1 - diff1[0]) < 1e-4)
diff2 = preds[2] - preds[1]
assert np.all(np.abs(diff2 - diff2[0]) < 1e-4)
def test_exact_interaction_constraints(self):
self.run_interaction_constraints(tree_method='exact')
def test_hist_interaction_constraints(self):
self.run_interaction_constraints(tree_method='hist')
def test_approx_interaction_constraints(self):
self.run_interaction_constraints(tree_method='approx')
def test_interaction_constraints_feature_names(self):
with pytest.raises(ValueError):
constraints = [('feature_0', 'feature_1')]
self.run_interaction_constraints(tree_method='exact',
interaction_constraints=constraints)
with pytest.raises(ValueError):
constraints = [('feature_0', 'feature_3')]
feature_names = ['feature_0', 'feature_1', 'feature_2']
self.run_interaction_constraints(tree_method='exact',
feature_names=feature_names,
interaction_constraints=constraints)
constraints = [('feature_0', 'feature_1')]
feature_names = ['feature_0', 'feature_1', 'feature_2']
self.run_interaction_constraints(tree_method='exact',
feature_names=feature_names,
interaction_constraints=constraints)
constraints = [['feature_0', 'feature_1'], ['feature_2']]
feature_names = ['feature_0', 'feature_1', 'feature_2']
self.run_interaction_constraints(tree_method='exact',
feature_names=feature_names,
interaction_constraints=constraints)
@pytest.mark.skipif(**tm.no_sklearn())
def training_accuracy(self, tree_method):
"""Test accuracy, reused by GPU tests."""
from sklearn.metrics import accuracy_score
dtrain = xgboost.DMatrix(
dpath + "agaricus.txt.train?indexing_mode=1&format=libsvm"
)
dtest = xgboost.DMatrix(
dpath + "agaricus.txt.test?indexing_mode=1&format=libsvm"
)
params = {
'eta': 1,
'max_depth': 6,
'objective': 'binary:logistic',
'tree_method': tree_method,
'interaction_constraints': '[[1,2], [2,3,4]]'
}
num_boost_round = 5
params['grow_policy'] = 'lossguide'
bst = xgboost.train(params, dtrain, num_boost_round)
pred_dtest = (bst.predict(dtest) < 0.5)
assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1
params['grow_policy'] = 'depthwise'
bst = xgboost.train(params, dtrain, num_boost_round)
pred_dtest = (bst.predict(dtest) < 0.5)
assert accuracy_score(dtest.get_label(), pred_dtest) < 0.1
@pytest.mark.parametrize("tree_method", ["hist", "approx", "exact"])
def test_hist_training_accuracy(self, tree_method):
self.training_accuracy(tree_method=tree_method)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
from __future__ import annotations
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ExtractTextToolInput(BaseModel):
"""Explicit no-args input for ExtractTextTool."""
class ExtractTextTool(BaseBrowserTool):
"""Tool for extracting all the text on the current webpage."""
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: Type[BaseModel] = ExtractTextToolInput
@model_validator(mode="before")
@classmethod
def check_acheck_bs_importrgs(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
async def _arun(
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
page = await aget_current_page(self.async_browser)
html_content = await page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
|
from __future__ import annotations
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ExtractTextToolInput(BaseModel):
"""Explicit no-args input for ExtractTextTool."""
class ExtractTextTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for extracting all the text on the current webpage."""
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: Type[BaseModel] = ExtractTextToolInput
@model_validator(mode="before")
@classmethod
def check_acheck_bs_importrgs(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
async def _arun(
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
page = await aget_current_page(self.async_browser)
html_content = await page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.3.1"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.