input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmcv.transforms import Compose
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
# register all modules in mmdet into the registries
register_all_modules()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# build test pipeline
model.cfg.test_dataloader.dataset.pipeline[0].type = 'LoadImageFromNDArray'
test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in mmcv.track_iter_progress(video_reader):
result = inference_detector(model, frame, test_pipeline=test_pipeline)
visualizer.add_datasample(
name='video',
image=frame,
pred_sample=result,
show=False,
pred_score_thr=args.score_thr)
frame = visualizer.get_image()
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import mmcv
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection video demo')
parser.add_argument('video', help='Video file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument('--out', type=str, help='Output video file')
parser.add_argument('--show', action='store_true', help='Show video')
parser.add_argument(
'--wait-time',
type=float,
default=1,
help='The interval of show (s), 0 is block')
args = parser.parse_args()
return args
def main():
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save/show the '
'video) with the argument "--out" or "--show"')
model = init_detector(args.config, args.checkpoint, device=args.device)
video_reader = mmcv.VideoReader(args.video)
video_writer = None
if args.out:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(
args.out, fourcc, video_reader.fps,
(video_reader.width, video_reader.height))
for frame in mmcv.track_iter_progress(video_reader):
result = inference_detector(model, frame)
frame = model.show_result(frame, result, score_thr=args.score_thr)
if args.show:
cv2.namedWindow('video', 0)
mmcv.imshow(frame, 'video', args.wait_time)
if args.out:
video_writer.write(frame)
if video_writer:
video_writer.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
from __future__ import annotations
__version__ = "3.5.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder.CrossEncoder import CrossEncoder
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
]
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .videofolder import videofolder
from .webdataset import webdataset
from .xml import xml
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
"xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
".xml": ("xml", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder", "videofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
from .xml import xml
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
"xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
".xml": ("xml", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.docstore.base import AddableMixin, Docstore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Docstore": "langchain_community.docstore.base",
"AddableMixin": "langchain_community.docstore.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AddableMixin",
"Docstore",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.docstore.base import AddableMixin, Docstore
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"Docstore": "langchain_community.docstore.base",
"AddableMixin": "langchain_community.docstore.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Docstore",
"AddableMixin",
]
|
from typing import Dict, List, Optional
from docarray import DocArray
def reduce(
left: DocArray, right: DocArray, left_id_map: Optional[Dict] = None
) -> 'DocArray':
"""
Reduces left and right DocArray into one DocArray in-place.
Changes are applied to the left DocArray.
Reducing 2 DocArrays consists in adding Documents in the second DocArray
to the first DocArray if they do not exist.
If a Document exists in both DocArrays (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocArrays are also reduced in the same way.
:param left: First DocArray to be reduced. Changes will be applied to it
in-place
:param right: Second DocArray to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocArray
:return: Reduced DocArray
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
left.append(doc)
return left
def reduce_all(docarrays: List[DocArray]) -> DocArray:
"""
Reduces a list of DocArrays into one DocArray.
Changes are applied to the first DocArray in-place.
The resulting DocArray contains Documents of all DocArrays.
If a Document exists (identified by their ID) in many DocArrays,
data properties are merged with priority to the left-most
DocArrays (that is, if a data attribute is set in a Document
belonging to many DocArrays, the attribute value of the left-most
DocArray is kept).
Nested DocArrays belonging to many DocArrays
are also reduced in the same way.
.. note::
- Nested DocArrays order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocArrays
when applying reduction.
:param docarrays: List of DocArrays to be reduced
:return: the resulting DocArray
"""
if len(docarrays) <= 1:
raise Exception(
'In order to reduce DocArrays' ' we should have more than one DocArray'
)
left = docarrays[0]
others = docarrays[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for da in others:
reduce(left, da, left_id_map)
return left
|
from docarray import DocumentArray
from typing import List, Optional, Dict
def reduce(
left: DocumentArray, right: DocumentArray, left_id_map: Optional[Dict] = None
) -> 'DocumentArray':
"""
Reduces left and right DocumentArray into one DocumentArray in-place.
Changes are applied to the left DocumentArray.
Reducing 2 DocumentArrays consists in adding Documents in the second DocumentArray
to the first DocumentArray if they do not exist.
If a Document exists in both DocumentArrays (identified by ID),
the data properties are merged with priority to the left Document.
Nested DocumentArrays are also reduced in the same way.
:param left: First DocumentArray to be reduced. Changes will be applied to it
in-place
:param right: Second DocumentArray to be reduced
:param left_id_map: Optional parameter to be passed in repeated calls
for optimizations, keeping a map of the Document ID to its offset
in the DocumentArray
:return: Reduced DocumentArray
"""
left_id_map = left_id_map or {doc.id: i for i, doc in enumerate(left)}
for doc in right:
if doc.id in left_id_map:
left[left_id_map[doc.id]].update(doc)
else:
left.append(doc)
return left
def reduce_all(docarrays: List[DocumentArray]) -> DocumentArray:
"""
Reduces a list of DocumentArrays into one DocumentArray.
Changes are applied to the first DocumentArray in-place.
The resulting DocumentArray contains Documents of all DocumentArrays.
If a Document exists (identified by their ID) in many DocumentArrays,
data properties are merged with priority to the left-most
DocumentArrays (that is, if a data attribute is set in a Document
belonging to many DocumentArrays, the attribute value of the left-most
DocumentArray is kept).
Nested DocumentArrays belonging to many DocumentArrays
are also reduced in the same way.
.. note::
- Nested DocumentArrays order does not follow any specific rule.
You might want to re-sort them in a later step.
- The final result depends on the order of DocumentArrays
when applying reduction.
:param docarrays: List of DocumentArrays to be reduced
:return: the resulting DocumentArray
"""
if len(docarrays) <= 1:
raise Exception(
'In order to reduce DocumentArrays'
' we should have more than one DocumentArray'
)
left = docarrays[0]
others = docarrays[1:]
left_id_map = {doc.id: i for i, doc in enumerate(left)}
for da in others:
reduce(left, da, left_id_map)
return left
|
"""
Experimental Object Oriented Distributed API - torch.distributed._dist2
=======================================================================
This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely.
This is intended as a proving ground for more flexible and object oriented distributed APIs.
"""
from collections.abc import Generator
from contextlib import contextmanager
from datetime import timedelta
from typing import Protocol, Union
import torch
from torch._C._distributed_c10d import (
_current_process_group,
_set_process_group,
Backend,
ProcessGroup,
Store,
)
from torch.distributed.rendezvous import rendezvous
_BACKENDS: dict[str, "ProcessGroupFactory"] = {}
class ProcessGroupFactory(Protocol):
"""Protocol for process group factories."""
def __call__(
self,
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup: ...
def register_backend(name: str, func: ProcessGroupFactory) -> None:
"""
Register a new process group backend.
Args:
name: The name of the backend.
func: The function to create the process group.
"""
if name in _BACKENDS:
raise ValueError(f"Backend {name} already registered")
_BACKENDS[name] = func
def _gloo_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupGloo
assert pg_options is None, "Gloo backend does not support options"
backend_class = ProcessGroupGloo(store, rank, world_size, timeout)
backend_class._set_sequence_number_for_group()
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.GLOO)
# register devices
pg._register_backend(device, ProcessGroup.BackendType.GLOO, backend_class)
pg._register_backend(
torch.device("cpu"), ProcessGroup.BackendType.GLOO, backend_class
)
if torch.cuda.is_available():
pg._register_backend(
torch.device("cuda"), ProcessGroup.BackendType.GLOO, backend_class
)
return pg
def _nccl_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupNCCL
assert isinstance(pg_options, ProcessGroupNCCL.Options)
pg_options._timeout = timeout
backend_class = ProcessGroupNCCL(store, rank, world_size, pg_options)
backend_class._set_sequence_number_for_group()
backend_class.eager_connect_single_device(device)
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.NCCL)
pg._register_backend(device, ProcessGroup.BackendType.NCCL, backend_class)
return pg
register_backend("gloo", _gloo_factory)
register_backend("nccl", _nccl_factory)
def new_group(
backend: str,
timeout: timedelta,
device: Union[str, torch.device],
pg_options: Backend.Options,
) -> ProcessGroup:
"""
Create a new process group with the given backend and options. This group is
independent and will not be globally registered and thus not usable via the
standard torch.distributed.* APIs.
Args:
backend: The backend to use for the process group.
timeout: The timeout for collective operations.
device: The device to use for the process group.
pg_options: The options to use for the process group.
Returns:
A new process group.
"""
if backend not in _BACKENDS:
raise ValueError(f"Backend {backend} not registered")
device = torch.device(device)
store, rank, world_size = next(iter(rendezvous("env://")))
store.set_timeout(timeout)
return _BACKENDS[backend](store, rank, world_size, timeout, device, pg_options)
def current_process_group() -> ProcessGroup:
"""
Get the current process group. Thread local method.
Returns:
The current process group.
"""
return _current_process_group()
@contextmanager
def process_group(pg: ProcessGroup) -> Generator[None, None, None]:
"""
Context manager for process groups. Thread local method.
Args:
pg: The process group to use.
"""
prev_pg = current_process_group()
_set_process_group(pg)
try:
yield
finally:
_set_process_group(prev_pg)
|
"""
Experimental Object Oriented Distributed API - torch.distributed._dist2
=======================================================================
This is an experimental new API for PyTorch Distributed. This is actively in development and subject to change or deletion entirely.
This is intended as a proving ground for more flexible and object oriented distributed APIs.
"""
from datetime import timedelta
from typing import Protocol, Union
import torch
from torch._C._distributed_c10d import Backend, ProcessGroup, Store
from torch.distributed.rendezvous import rendezvous
_BACKENDS: dict[str, "ProcessGroupFactory"] = {}
class ProcessGroupFactory(Protocol):
"""Protocol for process group factories."""
def __call__(
self,
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup: ...
def register_backend(name: str, func: ProcessGroupFactory) -> None:
"""
Register a new process group backend.
Args:
name: The name of the backend.
func: The function to create the process group.
"""
if name in _BACKENDS:
raise ValueError(f"Backend {name} already registered")
_BACKENDS[name] = func
def _gloo_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupGloo
assert pg_options is None, "Gloo backend does not support options"
backend_class = ProcessGroupGloo(store, rank, world_size, timeout)
backend_class._set_sequence_number_for_group()
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.GLOO)
# register devices
pg._register_backend(device, ProcessGroup.BackendType.GLOO, backend_class)
pg._register_backend(
torch.device("cpu"), ProcessGroup.BackendType.GLOO, backend_class
)
if torch.cuda.is_available():
pg._register_backend(
torch.device("cuda"), ProcessGroup.BackendType.GLOO, backend_class
)
return pg
def _nccl_factory(
store: Store,
rank: int,
world_size: int,
timeout: timedelta,
device: torch.device,
pg_options: Backend.Options,
) -> ProcessGroup:
from torch.distributed import ProcessGroupNCCL
assert isinstance(pg_options, ProcessGroupNCCL.Options)
pg_options._timeout = timeout
backend_class = ProcessGroupNCCL(store, rank, world_size, pg_options)
backend_class._set_sequence_number_for_group()
backend_class.eager_connect_single_device(device)
pg = ProcessGroup(store, rank, world_size)
pg._set_default_backend(ProcessGroup.BackendType.NCCL)
pg._register_backend(device, ProcessGroup.BackendType.NCCL, backend_class)
return pg
register_backend("gloo", _gloo_factory)
register_backend("nccl", _nccl_factory)
def new_group(
backend: str,
timeout: timedelta,
device: Union[str, torch.device],
pg_options: Backend.Options,
) -> ProcessGroup:
"""
Create a new process group with the given backend and options. This group is
independent and will not be globally registered and thus not usable via the
standard torch.distributed.* APIs.
Args:
backend: The backend to use for the process group.
timeout: The timeout for collective operations.
device: The device to use for the process group.
pg_options: The options to use for the process group.
Returns:
A new process group.
"""
if backend not in _BACKENDS:
raise ValueError(f"Backend {backend} not registered")
device = torch.device(device)
store, rank, world_size = next(iter(rendezvous("env://")))
store.set_timeout(timeout)
return _BACKENDS[backend](store, rank, world_size, timeout, device, pg_options)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_batch_norm, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin',
'set_multi_processing', 'has_batch_norm'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .setup_env import set_multi_processing
from .version_utils import digit_version, get_git_hash
# TODO: creates intractable circular import issues
# from .time_counter import TimeCounter
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin',
'set_multi_processing'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from mmengine.model import stack_batch
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, data_samples):
output = self.forward(x)
seg_preds = output['seg_preds']
gt_semantic_segs = [
data_sample.gt_sem_seg for data_sample in data_samples
]
gt_semantic_segs = stack_batch(gt_semantic_segs, pad_value=255)
return self.loss(seg_preds, gt_semantic_segs)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
|
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from ...core.utils import stack_batch
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, data_samples):
output = self.forward(x)
seg_preds = output['seg_preds']
gt_semantic_segs = [
data_sample.gt_sem_seg for data_sample in data_samples
]
gt_semantic_segs = stack_batch(gt_semantic_segs, pad_value=255)
return self.loss(seg_preds, gt_semantic_segs)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False, # noqa: FBT001,FBT002
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
return LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False, # noqa: FBT001,FBT002
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
return LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel,
verbose: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
"""Data embedding techniques."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._isomap import Isomap
from ._locally_linear import LocallyLinearEmbedding, locally_linear_embedding
from ._mds import MDS, smacof
from ._spectral_embedding import SpectralEmbedding, spectral_embedding
from ._t_sne import TSNE, trustworthiness
__all__ = [
"MDS",
"TSNE",
"Isomap",
"LocallyLinearEmbedding",
"SpectralEmbedding",
"locally_linear_embedding",
"smacof",
"spectral_embedding",
"trustworthiness",
]
|
"""Data embedding techniques."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from ._isomap import Isomap
from ._locally_linear import LocallyLinearEmbedding, locally_linear_embedding
from ._mds import MDS, smacof
from ._spectral_embedding import SpectralEmbedding, spectral_embedding
from ._t_sne import TSNE, trustworthiness
__all__ = [
"locally_linear_embedding",
"LocallyLinearEmbedding",
"Isomap",
"MDS",
"smacof",
"SpectralEmbedding",
"spectral_embedding",
"TSNE",
"trustworthiness",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.dist import all_reduce_params, is_distributed
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
priority = 'NORMAL'
def __init__(self) -> None:
self.distributed = is_distributed()
def after_train_epoch(self, runner) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self.distributed:
all_reduce_params(runner.model.buffers(), op='mean')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine import dist
from mmengine.registry import HOOKS
from .hook import Hook
@HOOKS.register_module()
class SyncBuffersHook(Hook):
"""Synchronize model buffers such as running_mean and running_var in BN at
the end of each epoch."""
priority = 'NORMAL'
def __init__(self) -> None:
self.distributed = dist.is_distributed()
def after_train_epoch(self, runner) -> None:
"""All-reduce model buffers at the end of each epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self.distributed:
dist.all_reduce_params(runner.model.buffers(), op='mean')
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-based
Object Detectors with Online Hard Example Mining
<https://arxiv.org/abs/1604.03540>`_.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.context = context
if not hasattr(self.context, 'num_stages'):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if not hasattr(self.context, 'num_stages'):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(
self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected positive samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of positive samples
"""
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected negative samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of negative samples
"""
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(
neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
neg_labels, feats)
|
import torch
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-based
Object Detectors with Online Hard Example Mining
<https://arxiv.org/abs/1604.03540>`_.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.context = context
if not hasattr(self.context, 'num_stages'):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if not hasattr(self.context, 'num_stages'):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(
self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected positive samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of positive samples
"""
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected negative samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of negative samples
"""
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(
neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
neg_labels, feats)
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
from tempfile import TemporaryDirectory
from unittest import TestCase, skipIf
from mmengine.logging import MMLogger
from mmengine.registry import (DefaultScope, Registry,
count_registered_modules, init_default_scope,
root, traverse_registry_tree)
from mmengine.utils import is_installed
class TestUtils(TestCase):
def test_traverse_registry_tree(self):
# Hierarchical Registry
# DOGS
# _______|_______
# | |
# HOUNDS (hound) SAMOYEDS (samoyed)
# _______|_______ |
# | | |
# LITTLE_HOUNDS MID_HOUNDS LITTLE_SAMOYEDS
# (little_hound) (mid_hound) (little_samoyed)
DOGS = Registry('dogs')
HOUNDS = Registry('dogs', parent=DOGS, scope='hound')
LITTLE_HOUNDS = Registry( # noqa
'dogs', parent=HOUNDS, scope='little_hound')
MID_HOUNDS = Registry('dogs', parent=HOUNDS, scope='mid_hound')
SAMOYEDS = Registry('dogs', parent=DOGS, scope='samoyed')
LITTLE_SAMOYEDS = Registry( # noqa
'dogs', parent=SAMOYEDS, scope='little_samoyed')
@DOGS.register_module()
class GoldenRetriever:
pass
# traversing the tree from the root
result = traverse_registry_tree(DOGS)
self.assertEqual(result[0]['num_modules'], 1)
self.assertEqual(len(result), 6)
# traversing the tree from leaf node
result_leaf = traverse_registry_tree(MID_HOUNDS)
# result from any node should be the same
self.assertEqual(result, result_leaf)
@skipIf(not is_installed('torch'), 'tests requires torch')
def test_count_all_registered_modules(self):
temp_dir = TemporaryDirectory()
results = count_registered_modules(temp_dir.name, verbose=True)
self.assertTrue(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
registries_info = results['registries']
for registry in registries_info:
self.assertTrue(hasattr(root, registry))
self.assertEqual(registries_info[registry][0]['num_modules'],
len(getattr(root, registry).module_dict))
temp_dir.cleanup()
# test not saving results
count_registered_modules(save_path=None, verbose=False)
self.assertFalse(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
@skipIf(not is_installed('torch'), 'tests requires torch')
def test_init_default_scope(self):
# init default scope
init_default_scope('mmdet')
self.assertEqual(DefaultScope.get_current_instance().scope_name,
'mmdet')
# init default scope when another scope is init
name = f'test-{datetime.datetime.now()}'
DefaultScope.get_instance(name, scope_name='test')
# Warning should be raised since the current
# default scope is not 'mmdet'
with self.assertLogs(MMLogger.get_current_instance(), level='WARNING'):
init_default_scope('mmdet')
|
# Copyright (c) OpenMMLab. All rights reserved.
import datetime
import os.path as osp
from tempfile import TemporaryDirectory
from unittest import TestCase, skipIf
from mmengine.registry import (DefaultScope, Registry,
count_registered_modules, init_default_scope,
root, traverse_registry_tree)
from mmengine.utils import is_installed
class TestUtils(TestCase):
def test_traverse_registry_tree(self):
# Hierarchical Registry
# DOGS
# _______|_______
# | |
# HOUNDS (hound) SAMOYEDS (samoyed)
# _______|_______ |
# | | |
# LITTLE_HOUNDS MID_HOUNDS LITTLE_SAMOYEDS
# (little_hound) (mid_hound) (little_samoyed)
DOGS = Registry('dogs')
HOUNDS = Registry('dogs', parent=DOGS, scope='hound')
LITTLE_HOUNDS = Registry( # noqa
'dogs', parent=HOUNDS, scope='little_hound')
MID_HOUNDS = Registry('dogs', parent=HOUNDS, scope='mid_hound')
SAMOYEDS = Registry('dogs', parent=DOGS, scope='samoyed')
LITTLE_SAMOYEDS = Registry( # noqa
'dogs', parent=SAMOYEDS, scope='little_samoyed')
@DOGS.register_module()
class GoldenRetriever:
pass
# traversing the tree from the root
result = traverse_registry_tree(DOGS)
self.assertEqual(result[0]['num_modules'], 1)
self.assertEqual(len(result), 6)
# traversing the tree from leaf node
result_leaf = traverse_registry_tree(MID_HOUNDS)
# result from any node should be the same
self.assertEqual(result, result_leaf)
@skipIf(not is_installed('torch'), 'tests requires torch')
def test_count_all_registered_modules(self):
temp_dir = TemporaryDirectory()
results = count_registered_modules(temp_dir.name, verbose=True)
self.assertTrue(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
registries_info = results['registries']
for registry in registries_info:
self.assertTrue(hasattr(root, registry))
self.assertEqual(registries_info[registry][0]['num_modules'],
len(getattr(root, registry).module_dict))
temp_dir.cleanup()
# test not saving results
count_registered_modules(save_path=None, verbose=False)
self.assertFalse(
osp.exists(
osp.join(temp_dir.name, 'modules_statistic_results.json')))
@skipIf(not is_installed('torch'), 'tests requires torch')
def test_init_default_scope(self):
# init default scope
init_default_scope('mmdet')
self.assertEqual(DefaultScope.get_current_instance().scope_name,
'mmdet')
# init default scope when another scope is init
name = f'test-{datetime.datetime.now()}'
DefaultScope.get_instance(name, scope_name='test')
with self.assertWarnsRegex(
Warning, 'The current default scope "test" is not "mmdet"'):
init_default_scope('mmdet')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOF. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOF. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils.typing import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_
Args:
backbone (:obj:`ConfigDict` or dict): The backbone module.
neck (:obj:`ConfigDict` or dict): The neck module.
bbox_head (:obj:`ConfigDict` or dict): The bbox head module.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLOF. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLOF. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional):
Model preprocessing config for processing the input data.
it usually includes ``to_rgb``, ``pad_size_divisor``,
``pad_value``, ``mean`` and ``std``. Defaults to None.
init_cfg (:obj:`ConfigDict` or dict, optional): the config to control
the initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
import numpy as np
import torch
from docarray.document import BaseDocument
from docarray.typing import AnyUrl, NdArray, TorchTensor
def test_to_json():
class Mmdoc(BaseDocument):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
doc = Mmdoc(
img=np.zeros((3, 224, 224)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(3, 224, 224),
)
doc.json()
def test_from_json():
class Mmdoc(BaseDocument):
img: NdArray
url: AnyUrl
txt: str
torch_tensor: TorchTensor
doc = Mmdoc(
img=np.zeros((2, 2)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(3, 224, 224),
)
new_doc = Mmdoc.parse_raw(doc.json())
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
import numpy as np
import torch
from docarray.document import BaseDocument
from docarray.typing import AnyUrl, Tensor, TorchTensor
def test_to_json():
class Mmdoc(BaseDocument):
img: Tensor
url: AnyUrl
txt: str
torch_tensor: TorchTensor
doc = Mmdoc(
img=np.zeros((3, 224, 224)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(3, 224, 224),
)
doc.json()
def test_from_json():
class Mmdoc(BaseDocument):
img: Tensor
url: AnyUrl
txt: str
torch_tensor: TorchTensor
doc = Mmdoc(
img=np.zeros((2, 2)),
url='http://doccaray.io',
txt='hello',
torch_tensor=torch.zeros(3, 224, 224),
)
new_doc = Mmdoc.parse_raw(doc.json())
for (field, field2) in zip(doc.dict().keys(), new_doc.dict().keys()):
if field in ['torch_tensor', 'img']:
assert (getattr(doc, field) == getattr(doc, field2)).all()
else:
assert getattr(doc, field) == getattr(doc, field2)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well even when removing some layers.
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
import traceback
from datasets import load_dataset
from sentence_transformers import losses
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, SentenceTransformerTrainingArguments
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, SimilarityFunction
import logging
from datetime import datetime
import sys
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
batch_size = 16
num_train_epochs = 4
# Save path of the model
output_dir = f"output/adaptive_layer_sts_{model_name.replace('/', '-')}-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# If we want, we can limit the maximum sequence length for the model
# model.max_seq_length = 75
logging.info(model)
# 2. Load the STSB dataset: https://huggingface.co/datasets/sentence-transformers/stsb
train_dataset = load_dataset("sentence-transformers/stsb", split="train")
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
logging.info(train_dataset)
# 3. Define our training loss
# CoSENTLoss (https://sbert.net/docs/package_reference/losses.html#cosentloss) needs two text columns and one
# similarity score column (between 0 and 1)
inner_train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, inner_train_loss)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=num_train_epochs,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="adaptive-layer-sts", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
"""
This examples trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) for the STSbenchmark from scratch.
It uses AdaptiveLayerLoss with the powerful CoSENTLoss to train models that perform well at output dimensions [768, 512, 256, 128, 64].
It generates sentence embeddings that can be compared using cosine-similarity to measure the similarity.
Usage:
python adaptive_layer_sts.py
OR
python adaptive_layer_sts.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "distilbert-base-uncased"
# Read the dataset
train_batch_size = 16
num_epochs = 4
model_save_path = (
"output/adaptive_layer_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Convert the dataset to a DataLoader ready for training
logging.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.CoSENTLoss(model=model)
train_loss = losses.AdaptiveLayerLoss(model, train_loss)
logging.info("Read STSbenchmark dev dataset")
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training. We skip evaluation in this example
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
test_evaluator(model, output_path=model_save_path)
# Optionally, save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-sts-adaptive-layer")
except Exception:
logging.error(
"Error uploading model to the Hugging Face Hub. To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({model_save_path!r})` "
f"and saving it using `model.push_to_hub('{model_name}-sts-adaptive-layer')`."
)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
from autogpt_libs.feature_flag.client import (
initialize_launchdarkly,
shutdown_launchdarkly,
)
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.server.v2.library.routes
import backend.server.v2.store.routes
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
logging.getLogger("autogpt_libs").setLevel(logging.INFO)
@contextlib.contextmanager
def launch_darkly_context():
if settings.config.app_env != backend.util.settings.AppEnvironment.LOCAL:
initialize_launchdarkly()
try:
yield
finally:
shutdown_launchdarkly()
else:
yield
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
with launch_darkly_context():
yield
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api")
app.include_router(
backend.server.v2.store.routes.router, tags=["v2"], prefix="/api/store"
)
app.include_router(
backend.server.v2.library.routes.router, tags=["v2"], prefix="/api/library"
)
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(graph_exec_id: str, user_id: str):
execution = await backend.data.graph.get_execution(
user_id=user_id, execution_id=graph_exec_id
)
if not execution:
raise ValueError(f"Execution {graph_exec_id} not found")
return execution.status
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
import contextlib
import logging
import typing
import fastapi
import fastapi.responses
import starlette.middleware.cors
import uvicorn
from autogpt_libs.feature_flag.client import (
initialize_launchdarkly,
shutdown_launchdarkly,
)
import backend.data.block
import backend.data.db
import backend.data.graph
import backend.data.user
import backend.server.routers.v1
import backend.server.v2.store.routes
import backend.util.service
import backend.util.settings
settings = backend.util.settings.Settings()
logger = logging.getLogger(__name__)
logging.getLogger("autogpt_libs").setLevel(logging.INFO)
@contextlib.contextmanager
def launch_darkly_context():
if settings.config.app_env != backend.util.settings.AppEnvironment.LOCAL:
initialize_launchdarkly()
try:
yield
finally:
shutdown_launchdarkly()
else:
yield
@contextlib.asynccontextmanager
async def lifespan_context(app: fastapi.FastAPI):
await backend.data.db.connect()
await backend.data.block.initialize_blocks()
await backend.data.user.migrate_and_encrypt_user_integrations()
await backend.data.graph.fix_llm_provider_credentials()
with launch_darkly_context():
yield
await backend.data.db.disconnect()
docs_url = (
"/docs"
if settings.config.app_env == backend.util.settings.AppEnvironment.LOCAL
else None
)
app = fastapi.FastAPI(
title="AutoGPT Agent Server",
description=(
"This server is used to execute agents that are created by the "
"AutoGPT system."
),
summary="AutoGPT Agent Server",
version="0.1",
lifespan=lifespan_context,
docs_url=docs_url,
)
def handle_internal_http_error(status_code: int = 500, log_error: bool = True):
def handler(request: fastapi.Request, exc: Exception):
if log_error:
logger.exception(f"{request.method} {request.url.path} failed: {exc}")
return fastapi.responses.JSONResponse(
content={
"message": f"{request.method} {request.url.path} failed",
"detail": str(exc),
},
status_code=status_code,
)
return handler
app.add_exception_handler(ValueError, handle_internal_http_error(400))
app.add_exception_handler(Exception, handle_internal_http_error(500))
app.include_router(backend.server.routers.v1.v1_router, tags=["v1"], prefix="/api")
app.include_router(
backend.server.v2.store.routes.router, tags=["v2"], prefix="/api/store"
)
@app.get(path="/health", tags=["health"], dependencies=[])
async def health():
return {"status": "healthy"}
class AgentServer(backend.util.service.AppProcess):
def run(self):
server_app = starlette.middleware.cors.CORSMiddleware(
app=app,
allow_origins=settings.config.backend_cors_allow_origins,
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
uvicorn.run(
server_app,
host=backend.util.settings.Config().agent_api_host,
port=backend.util.settings.Config().agent_api_port,
)
@staticmethod
async def test_execute_graph(
graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str
):
return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id)
@staticmethod
async def test_create_graph(
create_graph: backend.server.routers.v1.CreateGraph,
user_id: str,
):
return await backend.server.routers.v1.create_new_graph(create_graph, user_id)
@staticmethod
async def test_get_graph_run_status(graph_exec_id: str, user_id: str):
execution = await backend.data.graph.get_execution(
user_id=user_id, execution_id=graph_exec_id
)
if not execution:
raise ValueError(f"Execution {graph_exec_id} not found")
return execution.status
@staticmethod
async def test_get_graph_run_node_execution_results(
graph_id: str, graph_exec_id: str, user_id: str
):
return await backend.server.routers.v1.get_graph_run_node_execution_results(
graph_id, graph_exec_id, user_id
)
@staticmethod
async def test_delete_graph(graph_id: str, user_id: str):
return await backend.server.routers.v1.delete_graph(graph_id, user_id)
def set_test_dependency_overrides(self, overrides: dict):
app.dependency_overrides.update(overrides)
|
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_large.pth')))
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(type='AmpOptimWrapper')
|
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_layers=[3, 8, 27, 3],
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_large.pth')))
fp16 = dict(loss_scale=dict(init_scale=512))
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
from .backend import Backend
from .common import AudioMetaData
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = torch.ops.torchaudio.sox_io_get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = torch.ops.torchaudio.sox_io_load_audio_file(
uri, frame_offset, num_frames, normalize, channels_first, format
)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
) -> None:
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
torch.ops.torchaudio.sox_io_save_audio_file(
uri,
src,
sample_rate,
channels_first,
None,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
from torchaudio.backend.common import AudioMetaData
from .backend import Backend
class SoXBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support reading from file-like objects. ",
"Please use an alternative backend that does support reading from file-like objects, e.g. FFmpeg.",
)
else:
sinfo = torch.ops.torchaudio.sox_io_get_info(uri, format)
if sinfo:
return AudioMetaData(*sinfo)
else:
raise RuntimeError(f"Failed to fetch metadata for {uri}.")
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
if hasattr(uri, "read"):
raise ValueError(
"SoX backend does not support loading from file-like objects. ",
"Please use an alternative backend that does support loading from file-like objects, e.g. FFmpeg.",
)
else:
ret = torch.ops.torchaudio.sox_io_load_audio_file(
uri, frame_offset, num_frames, normalize, channels_first, format
)
if not ret:
raise RuntimeError(f"Failed to load audio from {uri}.")
return ret
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
) -> None:
if hasattr(uri, "write"):
raise ValueError(
"SoX backend does not support writing to file-like objects. ",
"Please use an alternative backend that does support writing to file-like objects, e.g. FFmpeg.",
)
else:
torch.ops.torchaudio.sox_io_save_audio_file(
uri,
src,
sample_rate,
channels_first,
None,
format,
encoding,
bits_per_sample,
)
@staticmethod
def can_decode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "read")
@staticmethod
def can_encode(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str]) -> bool:
# i.e. not a file-like object.
return not hasattr(uri, "write")
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.7.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.7.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
import sys
from jina.serve.runtimes.asyncio import AsyncNewLoopRuntime
from jina.serve.runtimes.head.request_handling import HeaderRequestHandler
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
runtime_args.port = runtime_args.port
with AsyncNewLoopRuntime(args=runtime_args, req_handler_cls=HeaderRequestHandler) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
import sys
from jina.serve.runtimes.head import HeadRuntime
from jina.parsers import set_pod_parser
def run(*args, **kwargs):
runtime_args = set_pod_parser().parse_args(args)
runtime_args.host = runtime_args.host[0]
runtime_args.port = runtime_args.port[0]
with HeadRuntime(runtime_args) as runtime:
runtime.run_forever()
if __name__ == '__main__':
run(*sys.argv[1:])
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Autograph specific overrides for tf.data.ops."""
import functools
import numpy as np
from tensorflow.python.autograph.operators import control_flow
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import tensor_conversion
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import cond
from tensorflow.python.util import nest
# TODO(mdan): These checks should be easier. Fix the nest API.
def _verify_spec_compatible(input_name, spec_name, input_, spec):
"""Verifies that a symbol has a type compatible vith a given spec.
Here, compatibility is viewed in the general TensorFlow sense: that the dtypes
are the same after implicit conversion, if both are tensors.
This verifier ensures consistent treatment of types across AutoGraph.
Args:
input_name: A name to use for `input_` in error messages.
spec_name: A name to use for `spec` in error messages.
input_: Any, value to verify.
spec: TypeSpec that `input_` must be compatible with.
Raises:
ValueError if the two types have been determined not to be compatible.
"""
assert isinstance(spec, tensor_spec.TensorSpec)
if input is None:
# TODO(mdan): raise from None when switching to Py3.
raise ValueError("{} cannot be None".format(input_name))
# TODO(mdan): Use TensorCompatible when ready.
if isinstance(input_, (bool, int, float, str, np.ndarray)):
input_ = tensor_conversion.convert_to_tensor_v2(input_)
input_dtype = getattr(input_, "dtype", None)
if input_dtype != spec.dtype:
input_dtype_str = "no dtype" if input_dtype is None else str(input_dtype)
raise TypeError(
"{} must have the same dtype as {}. Expected {}, got {}".format(
input_name, spec_name, spec.dtype, input_dtype_str
)
)
def _verify_structure_compatible(input_name, spec_name, input_, spec):
"""Verifies that possibly-structured symbol has types compatible vith another.
See _verify_spec_compatible for a more concrete meaning of "compatible".
Unspec _verify_spec_compatible, which handles singular Tensor-spec objects,
verify_structures_compatible can process structures recognized by tf.nest.
Args:
input_name: A name to use for `input_` in error messages.
spec_name: A name to use for `spec` in error messages.
input_: Any, value to verify. May, but doesn't need to, be a structure.
spec: Any, value that `input_` must be compatible with. May, but doesn't
need to, be a structure.
Raises:
ValueError if the two types have been determined not to be compatible.
"""
try:
nest.assert_same_structure(input_, spec, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError(
"{} must have the same element structure as {}.\n\n{}".format(
input_name, spec_name, str(e)
)
) from e
nest.map_structure(
functools.partial(_verify_spec_compatible, input_name, spec_name), input_,
spec)
def _next_tf_iterator(iterator, default=py_builtins.UNSPECIFIED):
if default is py_builtins.UNSPECIFIED:
# Without a default, fall back to the "normal" behavior which raises
# a runtime exception.
return next(iterator)
opt_iterate = iterator.get_next_as_optional()
_verify_structure_compatible(
"the default argument", "the iterate", default, iterator.element_spec
)
return cond.cond(
opt_iterate.has_value(), opt_iterate.get_value, lambda: default
)
def register_overrides():
py_builtins.next_registry.register(
iterator_ops.OwnedIterator, _next_tf_iterator
)
control_flow.for_loop_registry.register(
iterator_ops.OwnedIterator, control_flow._tf_iterator_for_stmt # pylint: disable=protected-access
)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Autograph specifc overrides for tf.data.ops."""
import functools
import numpy as np
from tensorflow.python.autograph.operators import control_flow
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import tensor_conversion
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import cond
from tensorflow.python.util import nest
# TODO(mdan): These checks should be easier. Fix the nest API.
def _verify_spec_compatible(input_name, spec_name, input_, spec):
"""Verifies that a symbol has a type compatible vith a given spec.
Here, compatibility is viewed in the general TensorFlow sense: that the dtypes
are the same after implicit conversion, if both are tensors.
This verifier ensures consistent treatment of types across AutoGraph.
Args:
input_name: A name to use for `input_` in error messages.
spec_name: A name to use for `spec` in error messages.
input_: Any, value to verify.
spec: TypeSpec that `input_` must be compatible with.
Raises:
ValueError if the two types have been determined not to be compatible.
"""
assert isinstance(spec, tensor_spec.TensorSpec)
if input is None:
# TODO(mdan): raise from None when switching to Py3.
raise ValueError("{} cannot be None".format(input_name))
# TODO(mdan): Use TensorCompatible when ready.
if isinstance(input_, (bool, int, float, str, np.ndarray)):
input_ = tensor_conversion.convert_to_tensor_v2(input_)
input_dtype = getattr(input_, "dtype", None)
if input_dtype != spec.dtype:
input_dtype_str = "no dtype" if input_dtype is None else str(input_dtype)
raise TypeError(
"{} must have the same dtype as {}. Expected {}, got {}".format(
input_name, spec_name, spec.dtype, input_dtype_str
)
)
def _verify_structure_compatible(input_name, spec_name, input_, spec):
"""Verifies that possibly-structured symbol has types compatible vith another.
See _verify_spec_compatible for a more concrete meaning of "compatible".
Unspec _verify_spec_compatible, which handles singular Tensor-spec objects,
verify_structures_compatible can process structures recognized by tf.nest.
Args:
input_name: A name to use for `input_` in error messages.
spec_name: A name to use for `spec` in error messages.
input_: Any, value to verify. May, but doesn't need to, be a structure.
spec: Any, value that `input_` must be compatible with. May, but doesn't
need to, be a structure.
Raises:
ValueError if the two types have been determined not to be compatible.
"""
try:
nest.assert_same_structure(input_, spec, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError(
"{} must have the same element structure as {}.\n\n{}".format(
input_name, spec_name, str(e)
)
) from e
nest.map_structure(
functools.partial(_verify_spec_compatible, input_name, spec_name), input_,
spec)
def _next_tf_iterator(iterator, default=py_builtins.UNSPECIFIED):
if default is py_builtins.UNSPECIFIED:
# Without a default, fall back to the "normal" behavior which raises
# a runtime exception.
return next(iterator)
opt_iterate = iterator.get_next_as_optional()
_verify_structure_compatible(
"the default argument", "the iterate", default, iterator.element_spec
)
return cond.cond(
opt_iterate.has_value(), opt_iterate.get_value, lambda: default
)
def register_overrides():
py_builtins.next_registry.register(
iterator_ops.OwnedIterator, _next_tf_iterator
)
control_flow.for_loop_registry.register(
iterator_ops.OwnedIterator, control_flow._tf_iterator_for_stmt # pylint: disable=protected-access
)
|
from typing import Dict, Set, Type
from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import Tensor
__all__ = [
'NdArray',
'Tensor',
'Embedding',
'NdArrayEmbedding',
'framework_types',
'type_to_framework',
]
framework_types: Dict[str, Set] = {'numpy': {NdArray, NdArrayEmbedding}, 'torch': set()}
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor'])
framework_types['torch'] = {TorchTensor, TorchEmbedding}
type_to_framework: Dict[Type, str] = {
type_: framework
for framework, type_set in framework_types.items()
for type_ in type_set
}
|
from docarray.typing.tensor.embedding import Embedding, NdArrayEmbedding
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import Tensor
__all__ = [
'NdArray',
'Tensor',
'Embedding',
'NdArrayEmbedding',
]
try:
import torch # noqa: F401
except ImportError:
pass
else:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor'])
|
"""MutliOn Client API tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.multion.close_session import MultionCloseSession
from langchain_community.tools.multion.create_session import MultionCreateSession
from langchain_community.tools.multion.update_session import MultionUpdateSession
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MultionCreateSession": "langchain_community.tools.multion.create_session",
"MultionUpdateSession": "langchain_community.tools.multion.update_session",
"MultionCloseSession": "langchain_community.tools.multion.close_session",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MultionCloseSession",
"MultionCreateSession",
"MultionUpdateSession",
]
|
"""MutliOn Client API tools."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools.multion.close_session import MultionCloseSession
from langchain_community.tools.multion.create_session import MultionCreateSession
from langchain_community.tools.multion.update_session import MultionUpdateSession
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MultionCreateSession": "langchain_community.tools.multion.create_session",
"MultionUpdateSession": "langchain_community.tools.multion.update_session",
"MultionCloseSession": "langchain_community.tools.multion.close_session",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MultionCreateSession",
"MultionUpdateSession",
"MultionCloseSession",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.utils import MultiConfig
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Defaults to 0.
num_fcs (int): Number of fc layers in the head. Defaults to 2.
fc_out_channels (int): Number of output channels of fc layer.
Defaults to 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Defaults to 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs: int = 0,
num_fcs: int = 2,
fc_out_channels: int = 1024,
downsample_factor: int = 2,
init_cfg: MultiConfig = dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg) -> None:
super().__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self) -> None:
"""Initialize weights."""
super(FCNMaskHead, self).init_weights()
def forward(self, x: Tensor) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tensor): Extract mask RoI features.
Returns:
Tensor: Predicted foreground masks.
"""
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmengine.model import ModuleList
from torch import Tensor
from mmdet.core.utils import MultiConfig
from mmdet.registry import MODELS
from .fcn_mask_head import FCNMaskHead
@MODELS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Defaults to 0.
num_fcs (int): Number of fc layers in the head. Defaults to 2.
fc_out_channels (int): Number of output channels of fc layer.
Defaults to 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Defaults to 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs: int = 0,
num_fcs: int = 2,
fc_out_channels: int = 1024,
downsample_factor: int = 2,
init_cfg: MultiConfig = dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg) -> None:
super().__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self) -> None:
"""Initialize weights."""
super(FCNMaskHead, self).init_weights()
def forward(self, x: Tensor) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tensor): Extract mask RoI features.
Returns:
Tensor: Predicted foreground masks.
"""
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import subprocess
import librosa
import pytest
from jina import Document, DocumentArray, Flow
from ...vggish import vggish_input
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
resp = f.post(on='test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sample_rate = librosa.load(
os.path.join(cur_dir, '../test_data/sample.wav')
)
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
['jina', 'executor', f'--uses=docker://{build_docker_image}'],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'pea',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:"/GPU:0"',
],
timeout=30,
check=True,
)
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import librosa
from jina import Flow, Document, DocumentArray
from ...vggish import vggish_input
from ...vggish_audio_encoder import VggishAudioEncoder
cur_dir = os.path.dirname(os.path.abspath(__file__))
def test_flow_from_yml():
doc = DocumentArray([Document()])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
resp = f.post(on='test', inputs=doc, return_results=True)
assert resp is not None
def test_embedding_exists():
x_audio, sample_rate = librosa.load(os.path.join(cur_dir, '../test_data/sample.wav'))
log_mel_examples = vggish_input.waveform_to_examples(x_audio, sample_rate)
doc = DocumentArray([Document(blob=log_mel_examples)])
with Flow.load_config(os.path.join(cur_dir, 'flow.yml')) as f:
responses = f.post(on='index', inputs=doc, return_results=True)
assert responses[0].docs[0].embedding is not None
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import SKLearnVectorStore
from langchain_community.vectorstores.sklearn import (
BaseSerializer,
BsonSerializer,
JsonSerializer,
ParquetSerializer,
SKLearnVectorStoreException,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSerializer": "langchain_community.vectorstores.sklearn",
"JsonSerializer": "langchain_community.vectorstores.sklearn",
"BsonSerializer": "langchain_community.vectorstores.sklearn",
"ParquetSerializer": "langchain_community.vectorstores.sklearn",
"SKLearnVectorStoreException": "langchain_community.vectorstores.sklearn",
"SKLearnVectorStore": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSerializer",
"BsonSerializer",
"JsonSerializer",
"ParquetSerializer",
"SKLearnVectorStore",
"SKLearnVectorStoreException",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import SKLearnVectorStore
from langchain_community.vectorstores.sklearn import (
BaseSerializer,
BsonSerializer,
JsonSerializer,
ParquetSerializer,
SKLearnVectorStoreException,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BaseSerializer": "langchain_community.vectorstores.sklearn",
"JsonSerializer": "langchain_community.vectorstores.sklearn",
"BsonSerializer": "langchain_community.vectorstores.sklearn",
"ParquetSerializer": "langchain_community.vectorstores.sklearn",
"SKLearnVectorStoreException": "langchain_community.vectorstores.sklearn",
"SKLearnVectorStore": "langchain_community.vectorstores",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BaseSerializer",
"JsonSerializer",
"BsonSerializer",
"ParquetSerializer",
"SKLearnVectorStoreException",
"SKLearnVectorStore",
]
|
from langchain_core.embeddings import Embeddings
from langchain_core.utils import secret_from_env
from openai import OpenAI
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
# type: ignore
class FireworksEmbeddings(BaseModel, Embeddings):
"""Fireworks embedding model integration.
Setup:
Install ``langchain_fireworks`` and set environment variable
``FIREWORKS_API_KEY``.
.. code-block:: bash
pip install -U langchain_fireworks
export FIREWORKS_API_KEY="your-api-key"
Key init args — completion params:
model: str
Name of Fireworks model to use.
Key init args — client params:
fireworks_api_key: SecretStr
Fireworks API key.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_fireworks import FireworksEmbeddings
model = FireworksEmbeddings(
model='nomic-ai/nomic-embed-text-v1.5'
# Use FIREWORKS_API_KEY env var or pass it in directly
# fireworks_api_key="..."
)
Embed multiple texts:
.. code-block:: python
vectors = embeddings.embed_documents(['hello', 'goodbye'])
# Showing only the first 3 coordinates
print(len(vectors))
print(vectors[0][:3])
.. code-block:: python
2
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embeddings.embed_query('hello')
print(vector[:3])
.. code-block:: python
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
"""
client: OpenAI = Field(default=None, exclude=True) # type: ignore[assignment] # :meta private:
fireworks_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env(
"FIREWORKS_API_KEY",
default="",
),
)
"""Fireworks API key.
Automatically read from env variable ``FIREWORKS_API_KEY`` if not provided.
"""
model: str = "nomic-ai/nomic-embed-text-v1.5"
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate environment variables."""
self.client = OpenAI(
api_key=self.fireworks_api_key.get_secret_value(),
base_url="https://api.fireworks.ai/inference/v1",
)
return self
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs."""
return [
i.embedding
for i in self.client.embeddings.create(input=texts, model=self.model).data
]
def embed_query(self, text: str) -> list[float]:
"""Embed query text."""
return self.embed_documents([text])[0]
|
from langchain_core.embeddings import Embeddings
from langchain_core.utils import secret_from_env
from openai import OpenAI
from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator
from typing_extensions import Self
# type: ignore
class FireworksEmbeddings(BaseModel, Embeddings):
"""Fireworks embedding model integration.
Setup:
Install ``langchain_fireworks`` and set environment variable
``FIREWORKS_API_KEY``.
.. code-block:: bash
pip install -U langchain_fireworks
export FIREWORKS_API_KEY="your-api-key"
Key init args — completion params:
model: str
Name of Fireworks model to use.
Key init args — client params:
fireworks_api_key: SecretStr
Fireworks API key.
See full list of supported init args and their descriptions in the params section.
Instantiate:
.. code-block:: python
from langchain_fireworks import FireworksEmbeddings
model = FireworksEmbeddings(
model='nomic-ai/nomic-embed-text-v1.5'
# Use FIREWORKS_API_KEY env var or pass it in directly
# fireworks_api_key="..."
)
Embed multiple texts:
.. code-block:: python
vectors = embeddings.embed_documents(['hello', 'goodbye'])
# Showing only the first 3 coordinates
print(len(vectors))
print(vectors[0][:3])
.. code-block:: python
2
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
Embed single text:
.. code-block:: python
input_text = "The meaning of life is 42"
vector = embeddings.embed_query('hello')
print(vector[:3])
.. code-block:: python
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
"""
client: OpenAI = Field(default=None, exclude=True) # type: ignore[assignment] # :meta private:
fireworks_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env(
"FIREWORKS_API_KEY",
default="",
),
)
"""Fireworks API key.
Automatically read from env variable `FIREWORKS_API_KEY` if not provided.
"""
model: str = "nomic-ai/nomic-embed-text-v1.5"
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate environment variables."""
self.client = OpenAI(
api_key=self.fireworks_api_key.get_secret_value(),
base_url="https://api.fireworks.ai/inference/v1",
)
return self
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed search docs."""
return [
i.embedding
for i in self.client.embeddings.create(input=texts, model=self.model).data
]
def embed_query(self, text: str) -> list[float]:
"""Embed query text."""
return self.embed_documents([text])[0]
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocList
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocArrays in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocList names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocList object at the specified name
:param name: The name of the DocList to delete
:param missing_ok: If true, no error will be raised if the DocList does not exist.
:return: True if the DocList was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
docs: 'DocList',
name: str,
public: bool,
show_progress: bool,
branding: Optional[Dict],
) -> Dict:
"""Push this DocList to the specified name.
:param docs: The DocList to push
:param name: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
public: bool = True,
show_progress: bool = False,
branding: Optional[Dict] = None,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param public: Whether the DocList should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocList
"""
...
@staticmethod
@abstractmethod
def pull(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocList':
"""Pull a DocList from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: A DocList
"""
...
@staticmethod
@abstractmethod
def pull_stream(
docs_cls: Type['DocList'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param docs_cls: The DocList class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocList will be cached locally
:return: An iterator of documents"""
...
|
from abc import ABC, abstractmethod
from typing import Dict, Iterator, List, Optional, Type
from typing_extensions import TYPE_CHECKING
if TYPE_CHECKING:
from docarray import BaseDoc, DocArray
class AbstractDocStore(ABC):
@staticmethod
@abstractmethod
def list(namespace: str, show_table: bool) -> List[str]:
"""List all DocArrays in the specified backend at the namespace.
:param namespace: The namespace to list
:param show_table: If true, a table is printed to the console
:return: A list of DocArray names
"""
...
@staticmethod
@abstractmethod
def delete(name: str, missing_ok: bool) -> bool:
"""Delete the DocArray object at the specified name
:param name: The name of the DocArray to delete
:param missing_ok: If true, no error will be raised if the DocArray does not exist.
:return: True if the DocArray was deleted, False if it did not exist.
"""
...
@staticmethod
@abstractmethod
def push(
da: 'DocArray',
name: str,
public: bool,
show_progress: bool,
branding: Optional[Dict],
) -> Dict:
"""Push this DocArray to the specified name.
:param da: The DocArray to push
:param name: The name to push to
:param public: Whether the DocArray should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocArray
"""
...
@staticmethod
@abstractmethod
def push_stream(
docs: Iterator['BaseDoc'],
url: str,
public: bool = True,
show_progress: bool = False,
branding: Optional[Dict] = None,
) -> Dict:
"""Push a stream of documents to the specified name.
:param docs: a stream of documents
:param url: The name to push to
:param public: Whether the DocArray should be publicly accessible
:param show_progress: If true, a progress bar will be displayed.
:param branding: Branding information to be stored with the DocArray
"""
...
@staticmethod
@abstractmethod
def pull(
da_cls: Type['DocArray'],
name: str,
show_progress: bool,
local_cache: bool,
) -> 'DocArray':
"""Pull a DocArray from the specified name.
:param da_cls: The DocArray class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocArray will be cached locally
:return: A DocArray
"""
...
@staticmethod
@abstractmethod
def pull_stream(
da_cls: Type['DocArray'],
name: str,
show_progress: bool,
local_cache: bool,
) -> Iterator['BaseDoc']:
"""Pull a stream of documents from the specified name.
:param da_cls: The DocArray class to instantiate
:param name: The name to pull from
:param show_progress: If true, a progress bar will be displayed.
:param local_cache: If true, the DocArray will be cached locally
:return: An iterator of documents"""
...
|
"""Macrometa GDN Reader."""
import json
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MacrometaGDNReader(BaseReader):
"""
Macrometa GDN Reader.
Reads vectors from Macrometa GDN
"""
def __init__(self, url: str, apikey: str):
self.url = url
self.apikey = apikey
def load_data(self, collection_list: List[str]) -> List[Document]:
"""
Loads data from the input directory.
Args:
api: Macrometa GDN API key
collection_name: Name of the collection to read from
"""
if collection_list is None:
raise ValueError("Must specify collection name(s)")
results = []
for collection_name in collection_list:
collection = self._load_collection(collection_name)
results.append(
Document(
text=collection, extra_info={"collection_name": collection_name}
)
)
return results
def _load_collection(self, collection_name: str) -> str:
all_documents = []
"""Loads a collection from the database.
Args:
collection_name: Name of the collection to read from
"""
url = self.url + "/_fabric/_system/_api/cursor"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": "apikey " + self.apikey,
}
data = {
"batchSize": 1000,
"ttl": 60,
"query": "FOR doc IN " + collection_name + " RETURN doc",
}
response = requests.post(url, headers=headers, data=json.dumps(data))
response_json = response.json()
if response.status_code == 201:
all_documents.extend(response_json.get("result", []))
while response_json.get("hasMore"):
cursor_id = response_json.get("id")
next_url = self.url + "/_fabric/_system/_api/cursor/" + cursor_id
response = requests.put(next_url, headers=headers)
if response.status_code == 200:
response_json = response.json()
all_documents.extend(response_json.get("result", []))
else:
print(f"Request failed with status code {response.status_code}")
break
else:
print(f"Initial request failed with status code {response.status_code}")
return str(all_documents)
if __name__ == "__main__":
reader = MacrometaGDNReader("https://api-anurag.eng.macrometa.io", "test")
print(reader.load_data(collection_list=["test"]))
|
"""Macrometa GDN Reader."""
import json
from typing import List
import requests
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class MacrometaGDNReader(BaseReader):
"""Macrometa GDN Reader.
Reads vectors from Macrometa GDN
"""
def __init__(self, url: str, apikey: str):
self.url = url
self.apikey = apikey
def load_data(self, collection_list: List[str]) -> List[Document]:
"""Loads data from the input directory.
Args:
api: Macrometa GDN API key
collection_name: Name of the collection to read from
"""
if collection_list is None:
raise ValueError("Must specify collection name(s)")
results = []
for collection_name in collection_list:
collection = self._load_collection(collection_name)
results.append(
Document(
text=collection, extra_info={"collection_name": collection_name}
)
)
return results
def _load_collection(self, collection_name: str) -> str:
all_documents = []
"""Loads a collection from the database.
Args:
collection_name: Name of the collection to read from
"""
url = self.url + "/_fabric/_system/_api/cursor"
headers = {
"accept": "application/json",
"content-type": "application/json",
"Authorization": "apikey " + self.apikey,
}
data = {
"batchSize": 1000,
"ttl": 60,
"query": "FOR doc IN " + collection_name + " RETURN doc",
}
response = requests.post(url, headers=headers, data=json.dumps(data))
response_json = response.json()
if response.status_code == 201:
all_documents.extend(response_json.get("result", []))
while response_json.get("hasMore"):
cursor_id = response_json.get("id")
next_url = self.url + "/_fabric/_system/_api/cursor/" + cursor_id
response = requests.put(next_url, headers=headers)
if response.status_code == 200:
response_json = response.json()
all_documents.extend(response_json.get("result", []))
else:
print(f"Request failed with status code {response.status_code}")
break
else:
print(f"Initial request failed with status code {response.status_code}")
return str(all_documents)
if __name__ == "__main__":
reader = MacrometaGDNReader("https://api-anurag.eng.macrometa.io", "test")
print(reader.load_data(collection_list=["test"]))
|
"""CIFAR10 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar10.load_data")
def load_data():
"""Loads the CIFAR10 dataset.
This is a dataset of 50,000 32x32 color training images and 10,000 test
images, labeled over 10 categories. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
The classes are:
| Label | Description |
|:-----:|-------------|
| 0 | airplane |
| 1 | automobile |
| 2 | bird |
| 3 | cat |
| 4 | deer |
| 5 | dog |
| 6 | frog |
| 7 | horse |
| 8 | ship |
| 9 | truck |
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
dirname = "cifar-10-batches-py-target"
origin = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
extract=True,
file_hash=( # noqa: E501
"6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
),
)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype="uint8")
y_train = np.empty((num_train_samples,), dtype="uint8")
# batches are within an inner folder
path = os.path.join(path, "cifar-10-batches-py")
for i in range(1, 6):
fpath = os.path.join(path, "data_batch_" + str(i))
(
x_train[(i - 1) * 10000 : i * 10000, :, :, :],
y_train[(i - 1) * 10000 : i * 10000],
) = load_batch(fpath)
fpath = os.path.join(path, "test_batch")
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
x_test = x_test.astype(x_train.dtype)
y_test = y_test.astype(y_train.dtype)
return (x_train, y_train), (x_test, y_test)
|
"""CIFAR10 small images classification dataset."""
import os
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.datasets.cifar import load_batch
from keras.src.utils.file_utils import get_file
@keras_export("keras.datasets.cifar10.load_data")
def load_data():
"""Loads the CIFAR10 dataset.
This is a dataset of 50,000 32x32 color training images and 10,000 test
images, labeled over 10 categories. See more info at the
[CIFAR homepage](https://www.cs.toronto.edu/~kriz/cifar.html).
The classes are:
| Label | Description |
|:-----:|-------------|
| 0 | airplane |
| 1 | automobile |
| 2 | bird |
| 3 | cat |
| 4 | deer |
| 5 | dog |
| 6 | frog |
| 7 | horse |
| 8 | ship |
| 9 | truck |
Returns:
Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.
**`x_train`**: `uint8` NumPy array of grayscale image data with shapes
`(50000, 32, 32, 3)`, containing the training data. Pixel values range
from 0 to 255.
**`y_train`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(50000, 1)` for the training data.
**`x_test`**: `uint8` NumPy array of grayscale image data with shapes
`(10000, 32, 32, 3)`, containing the test data. Pixel values range
from 0 to 255.
**`y_test`**: `uint8` NumPy array of labels (integers in range 0-9)
with shape `(10000, 1)` for the test data.
Example:
```python
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
assert x_train.shape == (50000, 32, 32, 3)
assert x_test.shape == (10000, 32, 32, 3)
assert y_train.shape == (50000, 1)
assert y_test.shape == (10000, 1)
```
"""
dirname = "cifar-10-batches-py"
origin = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
path = get_file(
fname=dirname,
origin=origin,
untar=True,
file_hash=( # noqa: E501
"6d958be074577803d12ecdefd02955f39262c83c16fe9348329d7fe0b5c001ce"
),
)
num_train_samples = 50000
x_train = np.empty((num_train_samples, 3, 32, 32), dtype="uint8")
y_train = np.empty((num_train_samples,), dtype="uint8")
for i in range(1, 6):
fpath = os.path.join(path, "data_batch_" + str(i))
(
x_train[(i - 1) * 10000 : i * 10000, :, :, :],
y_train[(i - 1) * 10000 : i * 10000],
) = load_batch(fpath)
fpath = os.path.join(path, "test_batch")
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if backend.image_data_format() == "channels_last":
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
x_test = x_test.astype(x_train.dtype)
y_test = y_test.astype(y_train.dtype)
return (x_train, y_train), (x_test, y_test)
|
import csv
import logging
import os
from typing import List
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: List[List[str]], scores: List[float], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info("Correlation:\tPearson: {:.4f}\tSpearman: {:.4f}".format(eval_pearson, eval_spearman))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
import logging
from scipy.stats import pearsonr, spearmanr
from typing import List
import os
import csv
from ... import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: List[List[str]], scores: List[float], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info("Correlation:\tPearson: {:.4f}\tSpearman: {:.4f}".format(eval_pearson, eval_spearman))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
from typing import Any, Sequence
from llama_index.core.base.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
)
from llama_index.core.llms.callbacks import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.core.llms.llm import LLM
class CustomLLM(LLM):
"""
Simple abstract base class for custom LLMs.
Subclasses must implement the `__init__`, `_complete`,
`_stream_complete`, and `metadata` methods.
"""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response_gen)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self.chat(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, formatted=formatted, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, formatted=formatted, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@classmethod
def class_name(cls) -> str:
return "custom_llm"
|
from typing import Any, Sequence
from llama_index.core.base.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
)
from llama_index.core.llms.callbacks import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.core.llms.llm import LLM
class CustomLLM(LLM):
"""Simple abstract base class for custom LLMs.
Subclasses must implement the `__init__`, `_complete`,
`_stream_complete`, and `metadata` methods.
"""
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
assert self.messages_to_prompt is not None
prompt = self.messages_to_prompt(messages)
completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response_gen)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self.chat(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, formatted=formatted, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, formatted=formatted, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@classmethod
def class_name(cls) -> str:
return "custom_llm"
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(
os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"), weights_only=True
)
)
return model
|
from __future__ import annotations
import json
import os
import torch
from safetensors.torch import load_model as load_safetensors_model
from safetensors.torch import save_model as save_safetensors_model
from torch import Tensor, nn
from sentence_transformers.util import fullname, import_from_string
class Dense(nn.Module):
"""
Feed-forward function with activation function.
This layer takes a fixed-sized sentence embedding and passes it through a feed-forward layer. Can be used to generate deep averaging networks (DAN).
Args:
in_features: Size of the input dimension
out_features: Output size
bias: Add a bias vector
activation_function: Pytorch activation function applied on
output
init_weight: Initial value for the matrix of the linear layer
init_bias: Initial value for the bias of the linear layer
"""
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
activation_function=nn.Tanh(),
init_weight: Tensor = None,
init_bias: Tensor = None,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bias = bias
self.activation_function = activation_function
self.linear = nn.Linear(in_features, out_features, bias=bias)
if init_weight is not None:
self.linear.weight = nn.Parameter(init_weight)
if init_bias is not None:
self.linear.bias = nn.Parameter(init_bias)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.activation_function(self.linear(features["sentence_embedding"]))})
return features
def get_sentence_embedding_dimension(self) -> int:
return self.out_features
def get_config_dict(self):
return {
"in_features": self.in_features,
"out_features": self.out_features,
"bias": self.bias,
"activation_function": fullname(self.activation_function),
}
def save(self, output_path, safe_serialization: bool = True) -> None:
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut)
if safe_serialization:
save_safetensors_model(self, os.path.join(output_path, "model.safetensors"))
else:
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def __repr__(self):
return f"Dense({self.get_config_dict()})"
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
config["activation_function"] = import_from_string(config["activation_function"])()
model = Dense(**config)
if os.path.exists(os.path.join(input_path, "model.safetensors")):
load_safetensors_model(model, os.path.join(input_path, "model.safetensors"))
else:
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
# coding=utf-8
# Copyright 2025 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import DiffusionPipeline
from diffusers.utils.testing_utils import backend_empty_cache, require_torch_gpu, slow, torch_device
@require_torch_gpu
@slow
class QuantCompileTests(unittest.TestCase):
@property
def quantization_config(self):
raise NotImplementedError(
"This property should be implemented in the subclass to return the appropriate quantization config."
)
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def _init_pipeline(self, quantization_config, torch_dtype):
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers",
quantization_config=quantization_config,
torch_dtype=torch_dtype,
)
return pipe
def _test_torch_compile(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype).to("cuda")
# import to ensure fullgraph True
pipe.transformer.compile(fullgraph=True)
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_cpu_offload(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype)
pipe.enable_model_cpu_offload()
pipe.transformer.compile()
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_group_offload_leaf(
self, quantization_config, torch_dtype=torch.bfloat16, *, use_stream: bool = False
):
torch._dynamo.config.cache_size_limit = 10000
pipe = self._init_pipeline(quantization_config, torch_dtype)
group_offload_kwargs = {
"onload_device": torch.device("cuda"),
"offload_device": torch.device("cpu"),
"offload_type": "leaf_level",
"use_stream": use_stream,
}
pipe.transformer.enable_group_offload(**group_offload_kwargs)
pipe.transformer.compile()
for name, component in pipe.components.items():
if name != "transformer" and isinstance(component, torch.nn.Module):
if torch.device(component.device).type == "cpu":
component.to("cuda")
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
|
# coding=utf-8
# Copyright 2025 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import torch
from diffusers import DiffusionPipeline
from diffusers.utils.testing_utils import backend_empty_cache, require_torch_gpu, slow, torch_device
@require_torch_gpu
@slow
class QuantCompileTests(unittest.TestCase):
quantization_config = None
def setUp(self):
super().setUp()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def tearDown(self):
super().tearDown()
gc.collect()
backend_empty_cache(torch_device)
torch.compiler.reset()
def _init_pipeline(self, quantization_config, torch_dtype):
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-3-medium-diffusers",
quantization_config=quantization_config,
torch_dtype=torch_dtype,
)
return pipe
def _test_torch_compile(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype).to("cuda")
# import to ensure fullgraph True
pipe.transformer.compile(fullgraph=True)
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_cpu_offload(self, quantization_config, torch_dtype=torch.bfloat16):
pipe = self._init_pipeline(quantization_config, torch_dtype)
pipe.enable_model_cpu_offload()
pipe.transformer.compile()
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
def _test_torch_compile_with_group_offload(self, quantization_config, torch_dtype=torch.bfloat16):
torch._dynamo.config.cache_size_limit = 10000
pipe = self._init_pipeline(quantization_config, torch_dtype)
group_offload_kwargs = {
"onload_device": torch.device("cuda"),
"offload_device": torch.device("cpu"),
"offload_type": "leaf_level",
"use_stream": True,
"non_blocking": True,
}
pipe.transformer.enable_group_offload(**group_offload_kwargs)
pipe.transformer.compile()
for name, component in pipe.components.items():
if name != "transformer" and isinstance(component, torch.nn.Module):
if torch.device(component.device).type == "cpu":
component.to("cuda")
for _ in range(2):
# small resolutions to ensure speedy execution.
pipe("a dog", num_inference_steps=3, max_sequence_length=16, height=256, width=256)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import PlaywrightURLLoader
from langchain_community.document_loaders.url_playwright import (
PlaywrightEvaluator,
UnstructuredHtmlEvaluator,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PlaywrightEvaluator": "langchain_community.document_loaders.url_playwright",
"UnstructuredHtmlEvaluator": "langchain_community.document_loaders.url_playwright",
"PlaywrightURLLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PlaywrightEvaluator",
"PlaywrightURLLoader",
"UnstructuredHtmlEvaluator",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import PlaywrightURLLoader
from langchain_community.document_loaders.url_playwright import (
PlaywrightEvaluator,
UnstructuredHtmlEvaluator,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PlaywrightEvaluator": "langchain_community.document_loaders.url_playwright",
"UnstructuredHtmlEvaluator": "langchain_community.document_loaders.url_playwright",
"PlaywrightURLLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PlaywrightEvaluator",
"UnstructuredHtmlEvaluator",
"PlaywrightURLLoader",
]
|
from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
)
max_length = max(lists_lengths.values(), default=0)
return max(1, max_length)
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]:
"""
Get the range of shard indices per job.
If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.
The shards indices order is preserved: e.g. all the first shards are given the first job.
Moreover all the jobs are given approximately the same number of shards.
Example:
```python
>>> _distribute_shards(2, max_num_jobs=4)
[range(0, 1), range(1, 2)]
>>> _distribute_shards(10, max_num_jobs=3)
[range(0, 4), range(4, 7), range(7, 10)]
```
"""
shards_indices_per_group = []
for group_idx in range(max_num_jobs):
num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
shard_indices = range(start, start + num_shards_to_add)
shards_indices_per_group.append(shard_indices)
return shards_indices_per_group
def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]:
"""Split the gen_kwargs into `max_num_job` gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
if num_shards == 1:
return [dict(gen_kwargs)]
else:
shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(value, list)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(shard_indices_per_group))
]
def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], list)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
"""Return a shuffled copy of the input gen_kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)}
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(gen_kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
|
from typing import List
import numpy as np
def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int:
"""Return the number of possible shards according to the input gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
# until we decide how to define sharding without ambiguity for users
lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)}
if len(set(lists_lengths.values())) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items())
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
)
)
max_length = max(lists_lengths.values(), default=0)
return max(1, max_length)
def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]:
"""
Get the range of shard indices per job.
If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard.
The shards indices order is preserved: e.g. all the first shards are given the first job.
Moreover all the jobs are given approximately the same number of shards.
Example:
```python
>>> _distribute_shards(2, max_num_jobs=4)
[range(0, 1), range(1, 2)]
>>> _distribute_shards(10, max_num_jobs=3)
[range(0, 4), range(4, 7), range(7, 10)]
```
"""
shards_indices_per_group = []
for group_idx in range(max_num_jobs):
num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
shard_indices = range(start, start + num_shards_to_add)
shards_indices_per_group.append(shard_indices)
return shards_indices_per_group
def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]:
"""Split the gen_kwargs into `max_num_job` gen_kwargs"""
# Having lists of different sizes makes sharding ambigious, raise an error in this case
num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs)
if num_shards == 1:
return [dict(gen_kwargs)]
else:
shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs)
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(value, list)
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(shard_indices_per_group))
]
def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], list)
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict:
"""Return a shuffled copy of the input gen_kwargs"""
# We must shuffle all the lists, and lists of the same size must have the same shuffling.
# This way entangled lists of (shard, shard_metadata) are still in the right order.
# First, let's generate the shuffled indices per list size
list_sizes = set(len(value) for value in gen_kwargs.values() if isinstance(value, list))
indices_per_size = {}
for size in list_sizes:
indices_per_size[size] = list(range(size))
rng.shuffle(indices_per_size[size])
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
shuffled_kwargs = dict(gen_kwargs)
for key, value in shuffled_kwargs.items():
if isinstance(value, list):
shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]]
return shuffled_kwargs
|
from typing import Iterable, Dict
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayWeaviate``"""
def _getitem(self, wid: str) -> 'Document':
"""Helper method for getting item with weaviate as storage
:param wid: weaviate id
:raises KeyError: raise error when weaviate id does not exist in storage
:return: Document
"""
try:
resp = self._client.data_object.get_by_id(wid, with_vector=True)
return Document.from_base64(
resp['properties']['_serialized'], **self._serialize_config
)
except Exception as ex:
raise KeyError(wid) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from weaviate
"""
return self._getitem(self._map_id(_id))
def _set_doc_by_id(self, _id: str, value: 'Document', flush: bool = True):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
payload = self._doc2weaviate_create_payload(value)
self._client.batch.add_data_object(**payload)
if flush:
self._client.batch.flush()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc, flush=False)
self._client.batch.flush()
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
wid = self._map_id(_id)
if self._client.data_object.exists(wid):
self._client.data_object.delete(wid)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
if self._class_name:
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
self._load_or_create_weaviate_schema()
def _load_offset2ids(self):
ids, self._offset2ids_wid = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
from typing import Iterable, Dict
from ..base.getsetdel import BaseGetSetDelMixin
from ..base.helper import Offset2ID
from .... import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayWeaviate``"""
def _getitem(self, wid: str) -> 'Document':
"""Helper method for getting item with weaviate as storage
:param wid: weaviate id
:raises KeyError: raise error when weaviate id does not exist in storage
:return: Document
"""
try:
resp = self._client.data_object.get_by_id(wid, with_vector=True)
return Document.from_base64(
resp['properties']['_serialized'], **self._serialize_config
)
except Exception as ex:
raise KeyError(wid) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from weaviate
"""
return self._getitem(self._map_id(_id))
def _set_doc_by_id(self, _id: str, value: 'Document', flush: bool = True):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
payload = self._doc2weaviate_create_payload(value)
self._client.batch.add_data_object(**payload)
if flush:
self._client.batch.flush()
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc, flush=False)
self._client.batch.flush()
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
wid = self._map_id(_id)
if self._client.data_object.exists(wid):
self._client.data_object.delete(wid)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
if self._class_name:
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
self._load_or_create_weaviate_schema()
def _load_offset2ids(self):
ids, self._offset2ids_wid = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.activations import deserialize
from keras.src.activations import get
from keras.src.activations import serialize
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_silu as hard_swish
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import silu as swish
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Gemma3 model."""
import unittest
from io import BytesIO
import requests
from PIL import Image
from transformers import is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch_accelerator,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import ShieldGemma2ForImageClassification, ShieldGemma2Processor
@slow
@require_torch_accelerator
# @require_read_token
class ShieldGemma2IntegrationTest(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model(self):
model_id = "google/shieldgemma-2-4b-it"
processor = ShieldGemma2Processor.from_pretrained(model_id, padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
response = requests.get(url)
image = Image.open(BytesIO(response.content))
model = ShieldGemma2ForImageClassification.from_pretrained(
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16
).to(torch_device)
inputs = processor(images=[image]).to(torch_device)
output = model(**inputs)
self.assertEqual(len(output.probabilities), 3)
for element in output.probabilities:
self.assertEqual(len(element), 2)
|
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing suite for the PyTorch Gemma3 model."""
import unittest
from io import BytesIO
import requests
from PIL import Image
from transformers import is_torch_available
from transformers.testing_utils import (
cleanup,
require_torch_gpu,
slow,
torch_device,
)
if is_torch_available():
import torch
from transformers import ShieldGemma2ForImageClassification, ShieldGemma2Processor
@slow
@require_torch_gpu
# @require_read_token
class ShieldGemma2IntegrationTest(unittest.TestCase):
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model(self):
model_id = "google/shieldgemma-2-4b-it"
processor = ShieldGemma2Processor.from_pretrained(model_id, padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
response = requests.get(url)
image = Image.open(BytesIO(response.content))
model = ShieldGemma2ForImageClassification.from_pretrained(
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16
).to(torch_device)
inputs = processor(images=[image]).to(torch_device)
output = model(**inputs)
self.assertEqual(len(output.probabilities), 3)
for element in output.probabilities:
self.assertEqual(len(element), 2)
|
import socket
from dataclasses import asdict
import numpy as np
import pytest
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
from xgboost import testing as tm
from xgboost.collective import Config
def run_rabit_worker(rabit_env: dict, world_size: int) -> int:
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast("test1234", 0)
assert str(ret) == "test1234"
reduced = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(reduced, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.no_loky())
def test_rabit_communicator() -> None:
from loky import get_reusable_executor
world_size = 2
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
tracker.start()
workers = []
with get_reusable_executor(max_workers=world_size) as pool:
for _ in range(world_size):
worker = pool.submit(
run_rabit_worker, rabit_env=tracker.worker_args(), world_size=world_size
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
def run_federated_worker(port: int, world_size: int, rank: int) -> int:
with xgb.collective.CommunicatorContext(
dmlc_communicator="federated",
federated_server_address=f"localhost:{port}",
federated_world_size=world_size,
federated_rank=rank,
):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f"rank:{rank}"
bret = xgb.collective.broadcast("test1234", 0)
assert str(bret) == "test1234"
aret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(aret, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.skip_win())
@pytest.mark.skipif(**tm.no_loky())
def test_federated_communicator() -> None:
from loky import get_reusable_executor
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
with get_reusable_executor(max_workers=world_size + 1) as pool:
kwargs = {"port": port, "n_workers": world_size, "blocking": False}
tracker = pool.submit(federated.run_federated_server, **kwargs)
if not tracker.running():
raise RuntimeError("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = pool.submit(
run_federated_worker, port=port, world_size=world_size, rank=rank
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
def test_config_serialization() -> None:
cfg = Config(retry=1, timeout=2, tracker_host_ip="127.0.0.1", tracker_port=None)
cfg1 = Config(**asdict(cfg))
assert cfg == cfg1
|
import socket
from dataclasses import asdict
import numpy as np
import pytest
from loky import get_reusable_executor
import xgboost as xgb
from xgboost import RabitTracker, build_info, federated
from xgboost import testing as tm
from xgboost.collective import Config
def run_rabit_worker(rabit_env: dict, world_size: int) -> int:
with xgb.collective.CommunicatorContext(**rabit_env):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == socket.gethostname()
ret = xgb.collective.broadcast("test1234", 0)
assert str(ret) == "test1234"
reduced = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(reduced, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.no_loky())
def test_rabit_communicator() -> None:
world_size = 2
tracker = RabitTracker(host_ip="127.0.0.1", n_workers=world_size)
tracker.start()
workers = []
with get_reusable_executor(max_workers=world_size) as pool:
for _ in range(world_size):
worker = pool.submit(
run_rabit_worker, rabit_env=tracker.worker_args(), world_size=world_size
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
def run_federated_worker(port: int, world_size: int, rank: int) -> int:
with xgb.collective.CommunicatorContext(
dmlc_communicator="federated",
federated_server_address=f"localhost:{port}",
federated_world_size=world_size,
federated_rank=rank,
):
assert xgb.collective.get_world_size() == world_size
assert xgb.collective.is_distributed()
assert xgb.collective.get_processor_name() == f"rank:{rank}"
bret = xgb.collective.broadcast("test1234", 0)
assert str(bret) == "test1234"
aret = xgb.collective.allreduce(np.asarray([1, 2, 3]), xgb.collective.Op.SUM)
assert np.array_equal(aret, np.asarray([2, 4, 6]))
return 0
@pytest.mark.skipif(**tm.skip_win())
@pytest.mark.skipif(**tm.no_loky())
def test_federated_communicator() -> None:
if not build_info()["USE_FEDERATED"]:
pytest.skip("XGBoost not built with federated learning enabled")
port = 9091
world_size = 2
with get_reusable_executor(max_workers=world_size + 1) as pool:
kwargs = {"port": port, "n_workers": world_size, "blocking": False}
tracker = pool.submit(federated.run_federated_server, **kwargs)
if not tracker.running():
raise RuntimeError("Error starting Federated Learning server")
workers = []
for rank in range(world_size):
worker = pool.submit(
run_federated_worker, port=port, world_size=world_size, rank=rank
)
workers.append(worker)
for worker in workers:
assert worker.result() == 0
def test_config_serialization() -> None:
cfg = Config(retry=1, timeout=2, tracker_host_ip="127.0.0.1", tracker_port=None)
cfg1 = Config(**asdict(cfg))
assert cfg == cfg1
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_tood_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from torchaudio_unittest.prototype.rnnt_test_impl import ConformerRNNTTestImpl
@skipIfNoCuda
class ConformerRNNTFloat32GPUTest(ConformerRNNTTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class ConformerRNNTFloat64GPUTest(ConformerRNNTTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from torchaudio_unittest.prototype.rnnt_test_impl import ConformerRNNTTestImpl
@skipIfNoCuda
class ConformerRNNTFloat32GPUTest(ConformerRNNTTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class ConformerRNNTFloat64GPUTest(ConformerRNNTTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/mot_challenge.py', '../_base_/default_runtime.py'
]
default_hooks = dict(
logger=dict(type='LoggerHook', interval=1),
visualization=dict(type='TrackVisualizationHook', draw=False))
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer')
# custom hooks
custom_hooks = [
# Synchronize model buffers such as running_mean and running_var in BN
# at the end of each epoch
dict(type='SyncBuffersHook')
]
detector = _base_.model
detector.pop('data_preprocessor')
detector.rpn_head.bbox_coder.update(dict(clip_border=False))
detector.roi_head.bbox_head.update(dict(num_classes=1))
detector.roi_head.bbox_head.bbox_coder.update(dict(clip_border=False))
detector['init_cfg'] = dict(
type='Pretrained',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/'
'faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth')
del _base_.model
model = dict(
type='DeepSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32),
detector=detector,
reid=dict(
type='BaseReID',
data_preprocessor=None,
backbone=dict(
type='mmcls.ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=380,
loss_cls=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU')),
init_cfg=dict(
type='Pretrained',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth' # noqa: E501
)),
tracker=dict(
type='SORTTracker',
motion=dict(type='KalmanFilter', center_only=False),
obj_score_thr=0.5,
reid=dict(
num_samples=10,
img_scale=(256, 128),
img_norm_cfg=None,
match_score_thr=2.0),
match_iou_thr=0.5,
momentums=None,
num_tentatives=2,
num_frames_retain=100))
train_dataloader = None
train_cfg = None
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/mot_challenge.py', '../_base_/default_runtime.py'
]
default_hooks = dict(
logger=dict(type='LoggerHook', interval=1),
visualization=dict(type='TrackVisualizationHook', draw=False))
vis_backends = [dict(type='LocalVisBackend')]
visualizer = dict(
type='TrackLocalVisualizer', vis_backends=vis_backends, name='visualizer')
# custom hooks
custom_hooks = [
# Synchronize model buffers such as running_mean and running_var in BN
# at the end of each epoch
dict(type='SyncBuffersHook')
]
detector = _base_.model
detector.pop('data_preprocessor')
detector.rpn_head.bbox_coder.update(dict(clip_border=False))
detector.roi_head.bbox_head.update(dict(num_classes=1))
detector.roi_head.bbox_head.bbox_coder.update(dict(clip_border=False))
detector['init_cfg'] = dict(
type='Pretrained',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/'
'faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth')
del _base_.model
model = dict(
type='DeepSORT',
data_preprocessor=dict(
type='TrackDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
rgb_to_bgr=False,
pad_size_divisor=32),
detector=detector,
reid=dict(
type='BaseReID',
data_preprocessor=None,
backbone=dict(
type='mmcls.ResNet',
depth=50,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=2048,
fc_channels=1024,
out_channels=128,
num_classes=380,
loss_cls=dict(type='mmcls.CrossEntropyLoss', loss_weight=1.0),
loss_triplet=dict(type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU')),
init_cfg=dict(
type='Pretrained',
checkpoint= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth' # noqa: E501
)),
tracker=dict(
type='SORTTracker',
motion=dict(type='KalmanFilter', center_only=False),
obj_score_thr=0.5,
reid=dict(
num_samples=10,
img_scale=(256, 128),
img_norm_cfg=None,
match_score_thr=2.0),
match_iou_thr=0.5,
momentums=None,
num_tentatives=2,
num_frames_retain=100))
train_dataloader = None
train_cfg = None
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
|
import os
import time
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker-compose.yml'))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, 'v8/docker-compose.yml'))
@pytest.fixture(scope='module', autouse=True)
def start_storage_v7():
os.system(f"docker-compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope='module', autouse=True)
def start_storage_v8():
os.system(f"docker-compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
class MyImageDoc(ImageDoc):
embedding: NdArray = Field(dims=128)
@pytest.fixture(scope='function')
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope='function')
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
|
import os
import time
import numpy as np
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.typing import NdArray
pytestmark = [pytest.mark.slow, pytest.mark.index]
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml_v7 = os.path.abspath(os.path.join(cur_dir, 'v7/docker-compose.yml'))
compose_yml_v8 = os.path.abspath(os.path.join(cur_dir, 'v8/docker-compose.yml'))
@pytest.fixture(scope='module', autouse=True)
def start_storage_v7():
os.system(f"docker-compose -f {compose_yml_v7} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v7} down --remove-orphans")
@pytest.fixture(scope='module', autouse=True)
def start_storage_v8():
os.system(f"docker-compose -f {compose_yml_v8} up -d --remove-orphans")
_wait_for_es()
yield
os.system(f"docker-compose -f {compose_yml_v8} down --remove-orphans")
def _wait_for_es():
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts='http://localhost:9200/')
while not es.ping():
time.sleep(0.5)
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dims=1000)
class FlatDoc(BaseDoc):
tens_one: NdArray = Field(dims=10)
tens_two: NdArray = Field(dims=50)
class NestedDoc(BaseDoc):
d: SimpleDoc
class DeepNestedDoc(BaseDoc):
d: NestedDoc
@pytest.fixture(scope='function')
def ten_simple_docs():
return [SimpleDoc(tens=np.random.randn(10)) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_flat_docs():
return [
FlatDoc(tens_one=np.random.randn(10), tens_two=np.random.randn(50))
for _ in range(10)
]
@pytest.fixture(scope='function')
def ten_nested_docs():
return [NestedDoc(d=SimpleDoc(tens=np.random.randn(10))) for _ in range(10)]
@pytest.fixture(scope='function')
def ten_deep_nested_docs():
return [
DeepNestedDoc(d=NestedDoc(d=SimpleDoc(tens=np.random.randn(10))))
for _ in range(10)
]
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_dropout_basics(self):
self.run_layer_test(
layers.GaussianDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_gaussian_dropout_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)),
np.sqrt(0.3 / (1 - 0.3)),
atol=0.02,
)
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_dropout_basics(self):
self.run_layer_test(
layers.GaussianDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
)
def test_gaussian_dropout_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)),
np.sqrt(0.3 / (1 - 0.3)),
atol=0.02,
)
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.documents import TextDoc
def test_text_document_init():
text = TextDoc('hello world')
assert text.text == 'hello world'
assert text == 'hello world'
text = TextDoc(text='hello world')
assert text.text == 'hello world'
assert text == 'hello world'
text = TextDoc()
assert text is not None
assert text.text is None
|
from docarray.documents import TextDoc
def test_text_document_init():
text = TextDoc('hello world')
assert text.text == 'hello world'
assert text == 'hello world'
text = TextDoc(text='hello world')
assert text.text == 'hello world'
assert text == 'hello world'
text = TextDoc()
assert text is not None
assert text.text is None
|
import io
import json
import logging
import os
import tempfile
from typing import IO
import torch
from torch._inductor import config
from torch._inductor.cpp_builder import BuildOptionsBase, CppBuilder
from torch.export.pt2_archive._package import (
AOTI_FILES,
AOTICompiledModel,
load_pt2,
package_pt2,
)
from torch.types import FileLike
log = logging.getLogger(__name__)
def compile_so(aoti_dir: str, aoti_files: list[str], so_path: str) -> str:
def get_aoti_file_with_suffix(suffix: str) -> str:
for file in aoti_files:
if file.endswith(suffix):
return file
raise RuntimeError(f"Unable to find file with suffix {suffix}")
# Compile all the files into a .so
cpp_file = os.path.join(aoti_dir, get_aoti_file_with_suffix(".cpp"))
consts_o = os.path.join(aoti_dir, get_aoti_file_with_suffix(".o"))
file_name = os.path.splitext(cpp_file)[0]
# Parse compile flags and build the .o file
with open(file_name + "_compile_flags.json") as f:
compile_flags = json.load(f)
compile_options = BuildOptionsBase(
**compile_flags, use_relative_path=config.is_fbcode()
)
object_builder = CppBuilder(
name=file_name,
sources=cpp_file,
BuildOption=compile_options,
)
output_o = object_builder.get_target_file_path()
object_builder.build()
# Parse linker flags and build the .so file
with open(file_name + "_linker_flags.json") as f:
linker_flags = json.load(f)
linker_options = BuildOptionsBase(
**linker_flags, use_relative_path=config.is_fbcode()
)
so_builder = CppBuilder(
name=os.path.split(so_path)[-1],
sources=[output_o, consts_o],
BuildOption=linker_options,
output_dir=so_path,
)
output_so = so_builder.get_target_file_path()
so_builder.build()
# mmapped weights
serialized_weights_filename = file_name + "_serialized_weights.bin"
if serialized_weights_filename in aoti_files:
with open(serialized_weights_filename, "rb") as f_weights:
serialized_weights = f_weights.read()
with open(output_so, "a+b") as f_so:
so_size = f_so.tell()
# Page align the weights
f_so.write(b" " * (16384 - so_size % 16384))
f_so.write(serialized_weights)
return output_so
def package_aoti(
archive_file: FileLike,
aoti_files: AOTI_FILES,
) -> FileLike:
"""
Saves the AOTInductor generated files to the PT2Archive format.
Args:
archive_file: The file name to save the package to.
aoti_files: This can either be a singular path to a directory containing
the AOTInductor files, or a dictionary mapping the model name to the
path to its AOTInductor generated files.
"""
return package_pt2(
archive_file,
aoti_files=aoti_files,
)
def load_package(
path: FileLike,
model_name: str = "model",
run_single_threaded: bool = False,
num_runners: int = 1,
device_index: int = -1,
) -> AOTICompiledModel: # type: ignore[type-arg]
try:
pt2_contents = load_pt2(
path,
run_single_threaded=run_single_threaded,
num_runners=num_runners,
device_index=device_index,
)
if model_name not in pt2_contents.aoti_runners:
raise RuntimeError(f"Model {model_name} not found in package")
return pt2_contents.aoti_runners[model_name]
except RuntimeError:
log.warning("Loading outdated pt2 file. Please regenerate your package.")
if isinstance(path, (io.IOBase, IO)):
with tempfile.NamedTemporaryFile(suffix=".pt2") as f:
# TODO(angelayi): We shouldn't need to do this -- miniz should
# handle reading the buffer. This is just a temporary workaround
path.seek(0)
f.write(path.read())
log.debug("Writing buffer to tmp file located at %s.", f.name)
loader = torch._C._aoti.AOTIModelPackageLoader(
f.name, model_name, run_single_threaded, num_runners, device_index
)
return AOTICompiledModel(loader)
path = os.fspath(path) # AOTIModelPackageLoader expects (str, str)
loader = torch._C._aoti.AOTIModelPackageLoader(
path, model_name, run_single_threaded, num_runners, device_index
)
return AOTICompiledModel(loader)
|
import io
import json
import logging
import os
import tempfile
from typing import IO, Union
import torch
from torch._inductor import config
from torch._inductor.cpp_builder import BuildOptionsBase, CppBuilder
from torch.export.pt2_archive._package import AOTICompiledModel, load_pt2, package_pt2
from torch.types import FileLike
log = logging.getLogger(__name__)
def compile_so(aoti_dir: str, aoti_files: list[str], so_path: str) -> str:
def get_aoti_file_with_suffix(suffix: str) -> str:
for file in aoti_files:
if file.endswith(suffix):
return file
raise RuntimeError(f"Unable to find file with suffix {suffix}")
# Compile all the files into a .so
cpp_file = os.path.join(aoti_dir, get_aoti_file_with_suffix(".cpp"))
consts_o = os.path.join(aoti_dir, get_aoti_file_with_suffix(".o"))
file_name = os.path.splitext(cpp_file)[0]
# Parse compile flags and build the .o file
with open(file_name + "_compile_flags.json") as f:
compile_flags = json.load(f)
compile_options = BuildOptionsBase(
**compile_flags, use_relative_path=config.is_fbcode()
)
object_builder = CppBuilder(
name=file_name,
sources=cpp_file,
BuildOption=compile_options,
)
output_o = object_builder.get_target_file_path()
object_builder.build()
# Parse linker flags and build the .so file
with open(file_name + "_linker_flags.json") as f:
linker_flags = json.load(f)
linker_options = BuildOptionsBase(
**linker_flags, use_relative_path=config.is_fbcode()
)
so_builder = CppBuilder(
name=os.path.split(so_path)[-1],
sources=[output_o, consts_o],
BuildOption=linker_options,
output_dir=so_path,
)
output_so = so_builder.get_target_file_path()
so_builder.build()
# mmapped weights
serialized_weights_filename = file_name + "_serialized_weights.bin"
if serialized_weights_filename in aoti_files:
with open(serialized_weights_filename, "rb") as f_weights:
serialized_weights = f_weights.read()
with open(output_so, "a+b") as f_so:
so_size = f_so.tell()
# Page align the weights
f_so.write(b" " * (16384 - so_size % 16384))
f_so.write(serialized_weights)
return output_so
def package_aoti(
archive_file: FileLike,
aoti_files: Union[list[str], dict[str, list[str]]],
) -> FileLike:
"""
Saves the AOTInductor generated files to the PT2Archive format.
Args:
archive_file: The file name to save the package to.
aoti_files: This can either be a singular path to a directory containing
the AOTInductor files, or a dictionary mapping the model name to the
path to its AOTInductor generated files.
"""
return package_pt2(archive_file, aoti_files=aoti_files)
def load_package(
path: FileLike,
model_name: str = "model",
run_single_threaded: bool = False,
num_runners: int = 1,
device_index: int = -1,
) -> AOTICompiledModel: # type: ignore[type-arg]
try:
pt2_contents = load_pt2(
path,
run_single_threaded=run_single_threaded,
num_runners=num_runners,
device_index=device_index,
)
if model_name not in pt2_contents.aoti_runners:
raise RuntimeError(f"Model {model_name} not found in package")
return pt2_contents.aoti_runners[model_name]
except RuntimeError:
log.warning("Loading outdated pt2 file. Please regenerate your package.")
if isinstance(path, (io.IOBase, IO)):
with tempfile.NamedTemporaryFile(suffix=".pt2") as f:
# TODO(angelayi): We shouldn't need to do this -- miniz should
# handle reading the buffer. This is just a temporary workaround
path.seek(0)
f.write(path.read())
log.debug("Writing buffer to tmp file located at %s.", f.name)
loader = torch._C._aoti.AOTIModelPackageLoader(
f.name, model_name, run_single_threaded, num_runners, device_index
)
return AOTICompiledModel(loader)
path = os.fspath(path) # AOTIModelPackageLoader expects (str, str)
loader = torch._C._aoti.AOTIModelPackageLoader(
path, model_name, run_single_threaded, num_runners, device_index
)
return AOTICompiledModel(loader)
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import InstanceData, PixelData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
from mmengine.data import BaseDataElement as PixelData
from mmengine.data import InstanceData
from mmdet.datasets.transforms import PackDetInputs
from mmdet.structures import DetDataSample
from mmdet.structures.mask import BitmapMasks
class TestPackDetInputs(unittest.TestCase):
def setUp(self):
"""Setup the model and optimizer which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
img_path = osp.join(data_prefix, 'color.jpg')
rng = np.random.RandomState(0)
self.results1 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool),
'proposals': rng.rand(2, 4)
}
self.results2 = {
'img_id': 1,
'img_path': img_path,
'ori_shape': (300, 400),
'img_shape': (600, 800),
'scale_factor': 2.0,
'flip': False,
'img': rng.rand(300, 400),
'gt_seg_map': rng.rand(300, 400),
'gt_masks':
BitmapMasks(rng.rand(3, 300, 400), height=300, width=400),
'gt_bboxes_labels': rng.rand(3, ),
'proposals': rng.rand(2, 4)
}
self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'scale_factor',
'flip')
def test_transform(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results1))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 2)
self.assertEqual(len(results['data_sample'].ignored_instances), 1)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_transform_without_ignore(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
results = transform(copy.deepcopy(self.results2))
self.assertIn('data_sample', results)
self.assertIsInstance(results['data_sample'], DetDataSample)
self.assertIsInstance(results['data_sample'].gt_instances,
InstanceData)
self.assertIsInstance(results['data_sample'].ignored_instances,
InstanceData)
self.assertEqual(len(results['data_sample'].gt_instances), 3)
self.assertEqual(len(results['data_sample'].ignored_instances), 0)
self.assertIsInstance(results['data_sample'].gt_sem_seg, PixelData)
self.assertIsInstance(results['data_sample'].proposals, InstanceData)
self.assertEqual(len(results['data_sample'].proposals), 2)
self.assertIsInstance(results['data_sample'].proposals.bboxes,
np.ndarray)
def test_repr(self):
transform = PackDetInputs(meta_keys=self.meta_keys)
self.assertEqual(
repr(transform), f'PackDetInputs(meta_keys={self.meta_keys})')
|
from docarray import Document, DocumentArray
import pytest
def test_add_ignore_existing_doc_id(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
def test_add_skip_wrong_data_type_and_fix_offset(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'index_name': 'test_add_skip_wrong_data_type_and_fix_offset',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=1000),
Document(id='1', price=20000),
Document(id='2', price=103000),
]
)
with pytest.raises(IndexError):
with elastic_doc:
elastic_doc.extend(
[
Document(id='0', price=10000),
Document(id='1', price=20000),
Document(id='3', price=30000),
Document(id='4', price=100000000000), # overflow int32
Document(id='5', price=2000),
Document(id='6', price=100000000000), # overflow int32
Document(id='7', price=30000),
]
)
expected_ids = ['0', '1', '2', '3', '5', '7']
assert len(elastic_doc) == 6
assert len(elastic_doc[:, 'id']) == 6
assert elastic_doc[:, 'id'] == expected_ids
assert elastic_doc._offset2ids.ids == expected_ids
|
from docarray import Document, DocumentArray
def test_add_ignore_existing_doc_id(start_storage):
elastic_doc = DocumentArray(
storage='elasticsearch',
config={
'n_dim': 3,
'columns': [('price', 'int')],
'distance': 'l2_norm',
'index_name': 'test_add_ignore_existing_doc_id',
},
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r1', embedding=[1, 1, 1]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r3', embedding=[3, 3, 3]),
Document(id='r4', embedding=[4, 4, 4]),
]
)
with elastic_doc:
elastic_doc.extend(
[
Document(id='r0', embedding=[0, 0, 0]),
Document(id='r2', embedding=[2, 2, 2]),
Document(id='r4', embedding=[4, 4, 4]),
Document(id='r5', embedding=[2, 2, 2]),
Document(id='r6', embedding=[4, 4, 4]),
]
)
indexed_offset_count = elastic_doc._client.count(
index=elastic_doc._index_name_offset2id
)['count']
assert len(elastic_doc) == len(elastic_doc[:, 'embedding'])
assert len(elastic_doc) == indexed_offset_count
assert len(elastic_doc[:, 'embedding']) == 7
|
import pytest
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal
@pytest.mark.integration
def test_available_models() -> None:
models = NVIDIAMultiModal().available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
import pytest
from llama_index.multi_modal_llms.nvidia import NVIDIAMultiModal
@pytest.mark.integration()
def test_available_models() -> None:
models = NVIDIAMultiModal().available_models
assert models
assert isinstance(models, list)
assert all(isinstance(model.id, str) for model in models)
|
import csv
import os
from . import InputExample
class TripletReader(object):
"""Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1),
one positive example (s2) and one negative example (s3)
"""
def __init__(
self,
dataset_folder,
s1_col_idx=0,
s2_col_idx=1,
s3_col_idx=2,
has_header=False,
delimiter="\t",
quoting=csv.QUOTE_NONE,
):
self.dataset_folder = dataset_folder
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.s3_col_idx = s3_col_idx
self.has_header = has_header
self.delimiter = delimiter
self.quoting = quoting
def get_examples(self, filename, max_examples=0):
data = csv.reader(
open(os.path.join(self.dataset_folder, filename), encoding="utf-8"),
delimiter=self.delimiter,
quoting=self.quoting,
)
examples = []
if self.has_header:
next(data)
for id, row in enumerate(data):
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
s3 = row[self.s3_col_idx]
examples.append(InputExample(texts=[s1, s2, s3]))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
from . import InputExample
import csv
import gzip
import os
class TripletReader(object):
"""
Reads in the a Triplet Dataset: Each line contains (at least) 3 columns, one anchor column (s1),
one positive example (s2) and one negative example (s3)
"""
def __init__(self, dataset_folder, s1_col_idx=0, s2_col_idx=1, s3_col_idx=2, has_header=False, delimiter="\t",
quoting=csv.QUOTE_NONE):
self.dataset_folder = dataset_folder
self.s1_col_idx = s1_col_idx
self.s2_col_idx = s2_col_idx
self.s3_col_idx = s3_col_idx
self.has_header = has_header
self.delimiter = delimiter
self.quoting = quoting
def get_examples(self, filename, max_examples=0):
"""
"""
data = csv.reader(open(os.path.join(self.dataset_folder, filename), encoding="utf-8"), delimiter=self.delimiter,
quoting=self.quoting)
examples = []
if self.has_header:
next(data)
for id, row in enumerate(data):
s1 = row[self.s1_col_idx]
s2 = row[self.s2_col_idx]
s3 = row[self.s3_col_idx]
examples.append(InputExample(texts=[s1, s2, s3]))
if max_examples > 0 and len(examples) >= max_examples:
break
return examples
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.saved_model import ExportArchive
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.export.export_lib import ExportArchive
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import NdArray
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_ndarray = np.zeros((3, 224, 224))
NdArray._flush_tensor_to_proto(nd_proto, value=original_ndarray)
nested_item = NodeProto(ndarray=nd_proto)
tensor = NdArray.from_protobuf(nested_item.ndarray)
assert (tensor == original_ndarray).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_ndarray = np.zeros((3, 224, 224))
NdArray._flush_tensor_to_proto(nd_proto, value=original_ndarray)
nested_item2 = NodeProto(ndarray=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
import numpy as np
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import Tensor
def test_nested_item_proto():
NodeProto(text='hello')
NodeProto(nested=DocumentProto())
def test_nested_optional_item_proto():
NodeProto()
def test_ndarray():
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item = NodeProto(tensor=nd_proto)
tensor = Tensor.from_protobuf(nested_item.tensor)
assert (tensor == original_tensor).all()
def test_document_proto_set():
data = {}
nested_item1 = NodeProto(text='hello')
nd_proto = NdArrayProto()
original_tensor = np.zeros((3, 224, 224))
Tensor._flush_tensor_to_proto(nd_proto, value=original_tensor)
nested_item2 = NodeProto(tensor=nd_proto)
data['a'] = nested_item1
data['b'] = nested_item2
DocumentProto(data=data)
|
from pathlib import Path
from llama_index.core.bridge.pydantic import AnyUrl
from llama_index.core.schema import MediaResource
def test_defaults():
m = MediaResource()
assert m.data is None
assert m.embeddings is None
assert m.mimetype is None
assert m.path is None
assert m.url is None
def test_mimetype():
png_1px = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
m = MediaResource(data=png_1px.encode("utf-8"), mimetype=None)
assert m.mimetype == "image/png"
def test_mimetype_from_path():
m = MediaResource(path="my-image.jpg", mimetype=None)
assert m.mimetype == "image/jpeg"
def test_mimetype_prioritizes_data():
png_1px = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
m = MediaResource(data=png_1px.encode("utf-8"), mimetype=None, path="my_image.jpg")
assert m.mimetype == "image/png"
def test_hash():
assert (
MediaResource(
data=b"test bytes",
path=Path("foo/bar/baz"),
url=AnyUrl("http://example.com"),
text="some text",
).hash
== "04414a5f03ad7fa055229b4d3690d47427cb0b65bc7eb8f770d1ecbd54ab4909"
)
assert MediaResource().hash == ""
|
from pathlib import Path
from llama_index.core.bridge.pydantic import AnyUrl
from llama_index.core.schema import MediaResource
def test_defaults():
m = MediaResource()
assert m.data is None
assert m.embeddings is None
assert m.mimetype is None
assert m.path is None
assert m.url is None
def test_mimetype():
png_1px = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
m = MediaResource(data=png_1px.encode("utf-8"), mimetype=None)
assert m.mimetype == "image/png"
def test_hash():
assert (
MediaResource(
data=b"test bytes",
path=Path("foo/bar/baz"),
url=AnyUrl("http://example.com"),
text="some text",
).hash
== "04414a5f03ad7fa055229b4d3690d47427cb0b65bc7eb8f770d1ecbd54ab4909"
)
assert MediaResource().hash == ""
|
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
pretrain_img_size=384,
embed_dims=128,
depths=depths,
num_heads=[4, 8, 16, 32],
window_size=12,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(in_channels=[128, 256, 512, 1024]))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optim_wrapper = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py']
pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa
depths = [2, 2, 18, 2]
model = dict(
backbone=dict(
pretrain_img_size=384,
embed_dims=128,
depths=depths,
num_heads=[4, 8, 16, 32],
window_size=12,
init_cfg=dict(type='Pretrained', checkpoint=pretrained)),
panoptic_head=dict(in_channels=[128, 256, 512, 1024]))
# set all layers in backbone to lr_mult=0.1
# set all norm layers, position_embeding,
# query_embeding, level_embeding to decay_multi=0.0
backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0)
backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0)
embed_multi = dict(lr_mult=1.0, decay_mult=0.0)
custom_keys = {
'backbone': dict(lr_mult=0.1, decay_mult=1.0),
'backbone.patch_embed.norm': backbone_norm_multi,
'backbone.norm': backbone_norm_multi,
'absolute_pos_embed': backbone_embed_multi,
'relative_position_bias_table': backbone_embed_multi,
'query_embed': embed_multi,
'query_feat': embed_multi,
'level_embed': embed_multi
}
custom_keys.update({
f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi
for stage_id, num_blocks in enumerate(depths)
for block_id in range(num_blocks)
})
custom_keys.update({
f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi
for stage_id in range(len(depths) - 1)
})
# optimizer
optimizer = dict(
paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from jina import Flow
from PIL import Image
from ...pdf_segmenter import PDFSegmenter
def test_flow(test_dir, doc_generator_img_text, expected_text):
flow = Flow().add(uses=PDFSegmenter)
doc_array = doc_generator_img_text
for doc in doc_array:
with flow:
results = flow.post(on='/test', inputs=doc, return_results=True)
assert len(results[0].docs) == 1
chunks = results[0].docs[0].chunks
assert len(chunks) == 3
for idx, c in enumerate(chunks[:2]):
with Image.open(
os.path.join(test_dir, f'data/test_img_{idx}.jpg')
) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from PIL import Image
from jina import Flow
from ...pdf_segmenter import PDFSegmenter
def test_flow(test_dir, doc_generator_img_text, expected_text):
flow = Flow().add(uses=PDFSegmenter)
doc_array = doc_generator_img_text
for doc in doc_array:
with flow:
results = flow.post(
on='/test',
inputs=doc,
return_results=True
)
assert len(results[0].docs) == 1
chunks = results[0].docs[0].chunks
assert len(chunks) == 3
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.48"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Top-level imports for LlamaIndex."""
__version__ = "0.12.47"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
# type: ignore
"""
Development Scripts for template packages
"""
from collections.abc import Sequence
from fastapi import FastAPI
from langserve import add_routes
from langchain_cli.utils.packages import get_langserve_export, get_package_root
def create_demo_server(
*,
config_keys: Sequence[str] = (),
playground_type: str = "default",
):
"""
Creates a demo server for the current template.
"""
app = FastAPI()
package_root = get_package_root()
pyproject = package_root / "pyproject.toml"
try:
package = get_langserve_export(pyproject)
mod = __import__(package["module"], fromlist=[package["attr"]])
chain = getattr(mod, package["attr"])
add_routes(
app,
chain,
config_keys=config_keys,
playground_type=playground_type,
)
except KeyError as e:
raise KeyError("Missing fields from pyproject.toml") from e
except ImportError as e:
raise ImportError("Could not import module defined in pyproject.toml") from e
return app
def create_demo_server_configurable():
return create_demo_server(config_keys=["configurable"])
def create_demo_server_chat():
return create_demo_server(playground_type="chat")
|
# type: ignore
"""
Development Scripts for template packages
"""
from typing import Sequence
from fastapi import FastAPI
from langserve import add_routes
from langchain_cli.utils.packages import get_langserve_export, get_package_root
def create_demo_server(
*,
config_keys: Sequence[str] = (),
playground_type: str = "default",
):
"""
Creates a demo server for the current template.
"""
app = FastAPI()
package_root = get_package_root()
pyproject = package_root / "pyproject.toml"
try:
package = get_langserve_export(pyproject)
mod = __import__(package["module"], fromlist=[package["attr"]])
chain = getattr(mod, package["attr"])
add_routes(
app,
chain,
config_keys=config_keys,
playground_type=playground_type,
)
except KeyError as e:
raise KeyError("Missing fields from pyproject.toml") from e
except ImportError as e:
raise ImportError("Could not import module defined in pyproject.toml") from e
return app
def create_demo_server_configurable():
return create_demo_server(config_keys=["configurable"])
def create_demo_server_chat():
return create_demo_server(playground_type="chat")
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def determine_gpus(cfg_name):
gpus = 8
gpus_pre_node = 8
if cfg_name.find('16x') >= 0:
gpus = 16
elif cfg_name.find('gn-head_4x4_1x_coco.py') >= 0 or \
cfg_name.find('gn-head_4x4_2x_coco.py') >= 0:
gpus = 4
gpus_pre_node = 4
elif 'lad' in cfg_name:
gpus = 2
gpus_pre_node = 2
return gpus, gpus_pre_node
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
work_dir = 'WORK_DIR=$2 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${3:-4} '
commands.append(cpus_pre_task)
commands.append('\n')
commands.append('\n')
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, cfg in enumerate(model_cfgs):
cfg = cfg.strip()
if len(cfg) == 0:
continue
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
fname, _ = osp.splitext(osp.basename(cfg))
out_fname = '$WORK_DIR/' + fname
gpus, gpus_pre_node = determine_gpus(cfg)
command_info = f'GPUS={gpus} GPUS_PER_NODE={gpus_pre_node} ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {train_script_name} '
command_info += '$PARTITION '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
command_info += '--cfg-options default_hooks.checkpoint.' \
'max_keep_ckpts=1 '
command_info += '&'
commands.append(command_info)
if i < len(model_cfgs):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model json to script')
parser.add_argument(
'txt_path', type=str, help='txt path output by benchmark_filter')
parser.add_argument(
'--partition',
type=str,
default='openmmlab',
help='slurm partition name')
parser.add_argument(
'--max-keep-ckpts',
type=int,
default=1,
help='The maximum checkpoints to keep')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
partition = args.partition # cluster name
root_name = './tools'
train_script_name = osp.join(root_name, 'slurm_train.sh')
# stdout is no output
stdout_cfg = '>/dev/null'
max_keep_ckpts = args.max_keep_ckpts
commands = []
with open(args.txt_path, 'r') as f:
model_cfgs = f.readlines()
for i, cfg in enumerate(model_cfgs):
cfg = cfg.strip()
if len(cfg) == 0:
continue
# print cfg name
echo_info = f'echo \'{cfg}\' &'
commands.append(echo_info)
commands.append('\n')
fname, _ = osp.splitext(osp.basename(cfg))
out_fname = osp.join(root_name, 'work_dir', fname)
# default setting
if cfg.find('16x') >= 0:
command_info = f'GPUS=16 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
elif cfg.find('gn-head_4x4_1x_coco.py') >= 0 or \
cfg.find('gn-head_4x4_2x_coco.py') >= 0:
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
else:
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {train_script_name} '
command_info += f'{partition} '
command_info += f'{fname} '
command_info += f'{cfg} '
command_info += f'{out_fname} '
if max_keep_ckpts:
command_info += f'--cfg-options ' \
f'checkpoint_config.max_keep_ckpts=' \
f'{max_keep_ckpts}' + ' '
command_info += f'{stdout_cfg} &'
commands.append(command_info)
if i < len(model_cfgs):
commands.append('\n')
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
import torch
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class WeightedLayerPooling(nn.Module):
"""Token embeddings are weighted mean of their different hidden layer representations"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super(WeightedLayerPooling, self).__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: Dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
import torch
from torch import Tensor
from torch import nn
from typing import Dict
import os
import json
class WeightedLayerPooling(nn.Module):
"""
Token embeddings are weighted mean of their different hidden layer representations
"""
def __init__(
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
):
super(WeightedLayerPooling, self).__init__()
self.config_keys = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
self.word_embedding_dimension = word_embedding_dimension
self.layer_start = layer_start
self.num_hidden_layers = num_hidden_layers
self.layer_weights = (
layer_weights
if layer_weights is not None
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
)
def forward(self, features: Dict[str, Tensor]):
ft_all_layers = features["all_layer_embeddings"]
all_layer_embedding = torch.stack(ft_all_layers)
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
features.update({"token_embeddings": weighted_average})
return features
def get_word_embedding_dimension(self):
return self.word_embedding_dimension
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = WeightedLayerPooling(**config)
model.load_state_dict(
torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
)
return model
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .two_stage import TwoStageDetector
@MODELS.register_module()
class FastRCNN(TwoStageDetector):
"""Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FastRCNN, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
proposals (List[List[Tensor]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. The Tensor should have a shape Px4, where
P is the number of proposals.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], proposals[0],
**kwargs)
else:
# TODO: support test-time augmentation
assert NotImplementedError
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FastRCNN(TwoStageDetector):
"""Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FastRCNN, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
proposals (List[List[Tensor]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. The Tensor should have a shape Px4, where
P is the number of proposals.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], proposals[0],
**kwargs)
else:
# TODO: support test-time augmentation
assert NotImplementedError
|
from __future__ import annotations
import asyncio
from collections.abc import AsyncIterator
from typing import Any, Literal, Union, cast
from langchain_core.callbacks import AsyncCallbackHandler
from langchain_core.outputs import LLMResult
# TODO If used by two LLM runs in parallel this won't work as expected
class AsyncIteratorCallbackHandler(AsyncCallbackHandler):
"""Callback handler that returns an async iterator."""
queue: asyncio.Queue[str]
done: asyncio.Event
@property
def always_verbose(self) -> bool:
return True
def __init__(self) -> None:
self.queue = asyncio.Queue()
self.done = asyncio.Event()
async def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if token is not None and token != "":
self.queue.put_nowait(token)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.done.set()
async def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
self.done.set()
# TODO implement the other methods
async def aiter(self) -> AsyncIterator[str]:
while not self.queue.empty() or not self.done.is_set():
# Wait for the next token in the queue,
# but stop waiting if the done event is set
done, other = await asyncio.wait(
[
# NOTE: If you add other tasks here, update the code below,
# which assumes each set has exactly one task each
asyncio.ensure_future(self.queue.get()),
asyncio.ensure_future(self.done.wait()),
],
return_when=asyncio.FIRST_COMPLETED,
)
# Cancel the other task
if other:
other.pop().cancel()
# Extract the value of the first completed task
token_or_done = cast(Union[str, Literal[True]], done.pop().result())
# If the extracted value is the boolean True, the done event was set
if token_or_done is True:
break
# Otherwise, the extracted value is a token, which we yield
yield token_or_done
|
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator, Dict, List, Literal, Union, cast
from langchain_core.callbacks import AsyncCallbackHandler
from langchain_core.outputs import LLMResult
# TODO If used by two LLM runs in parallel this won't work as expected
class AsyncIteratorCallbackHandler(AsyncCallbackHandler):
"""Callback handler that returns an async iterator."""
queue: asyncio.Queue[str]
done: asyncio.Event
@property
def always_verbose(self) -> bool:
return True
def __init__(self) -> None:
self.queue = asyncio.Queue()
self.done = asyncio.Event()
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if token is not None and token != "":
self.queue.put_nowait(token)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self.done.set()
async def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
self.done.set()
# TODO implement the other methods
async def aiter(self) -> AsyncIterator[str]:
while not self.queue.empty() or not self.done.is_set():
# Wait for the next token in the queue,
# but stop waiting if the done event is set
done, other = await asyncio.wait(
[
# NOTE: If you add other tasks here, update the code below,
# which assumes each set has exactly one task each
asyncio.ensure_future(self.queue.get()),
asyncio.ensure_future(self.done.wait()),
],
return_when=asyncio.FIRST_COMPLETED,
)
# Cancel the other task
if other:
other.pop().cancel()
# Extract the value of the first completed task
token_or_done = cast(Union[str, Literal[True]], done.pop().result())
# If the extracted value is the boolean True, the done event was set
if token_or_done is True:
break
# Otherwise, the extracted value is a token, which we yield
yield token_or_done
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmengine.utils import ManagerMeta, ManagerMixin
class SubClassA(ManagerMixin):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class SubClassB(ManagerMixin):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class TestGlobalMeta:
def test_init(self):
# Subclass's constructor does not contain name arguments will raise an
# error.
with pytest.raises(AssertionError):
class SubClassNoName1(metaclass=ManagerMeta):
def __init__(self, a, *args, **kwargs):
pass
# Valid subclass.
class GlobalAccessible1(metaclass=ManagerMeta):
def __init__(self, name):
self.name = name
class TestManagerMixin:
def test_init(self):
# test create instance by name.
base_cls = ManagerMixin('name')
assert base_cls.instance_name == 'name'
def test_get_instance(self):
# SubClass should manage their own `_instance_dict`.
with pytest.raises(RuntimeError):
SubClassA.get_current_instance()
SubClassA.get_instance('instance_a')
SubClassB.get_instance('instance_b')
assert SubClassB._instance_dict != SubClassA._instance_dict
# Test `message_hub` can create by name.
message_hub = SubClassA.get_instance('name1')
assert message_hub.instance_name == 'name1'
# No arguments will raise an assertion error.
SubClassA.get_instance('name2')
message_hub = SubClassA.get_current_instance()
message_hub.mark = -1
assert message_hub.instance_name == 'name2'
# Test get latest `message_hub` repeatedly.
message_hub = SubClassA.get_instance('name3')
assert message_hub.instance_name == 'name3'
message_hub = SubClassA.get_current_instance()
assert message_hub.instance_name == 'name3'
# Test get name2 repeatedly.
message_hub = SubClassA.get_instance('name2')
assert message_hub.mark == -1
# Non-string instance name will raise `AssertionError`.
with pytest.raises(AssertionError):
SubClassA.get_instance(name=1)
# `get_instance` should not accept other arguments if corresponding
# instance has been created.
with pytest.warns(UserWarning):
SubClassA.get_instance('name2', a=1, b=2)
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmengine.utils import ManagerMeta, ManagerMixin
class SubClassA(ManagerMixin):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class SubClassB(ManagerMixin):
def __init__(self, name='', *args, **kwargs):
super().__init__(name, *args, **kwargs)
class TestGlobalMeta:
def test_init(self):
# Subclass's constructor does not contain name arguments will raise an
# error.
with pytest.raises(AssertionError):
class SubClassNoName1(metaclass=ManagerMeta):
def __init__(self, a, *args, **kwargs):
pass
# Valid subclass.
class GlobalAccessible1(metaclass=ManagerMeta):
def __init__(self, name):
self.name = name
class TestManagerMixin:
def test_init(self):
# test create instance by name.
base_cls = ManagerMixin('name')
assert base_cls.instance_name == 'name'
def test_get_instance(self):
# SubClass should manage their own `_instance_dict`.
with pytest.raises(RuntimeError):
SubClassA.get_current_instance()
SubClassA.get_instance('instance_a')
SubClassB.get_instance('instance_b')
assert SubClassB._instance_dict != SubClassA._instance_dict
# Test `message_hub` can create by name.
message_hub = SubClassA.get_instance('name1')
assert message_hub.instance_name == 'name1'
# No arguments will raise an assertion error.
SubClassA.get_instance('name2')
message_hub = SubClassA.get_current_instance()
message_hub.mark = -1
assert message_hub.instance_name == 'name2'
# Test get latest `message_hub` repeatedly.
message_hub = SubClassA.get_instance('name3')
assert message_hub.instance_name == 'name3'
message_hub = SubClassA.get_current_instance()
assert message_hub.instance_name == 'name3'
# Test get name2 repeatedly.
message_hub = SubClassA.get_instance('name2')
assert message_hub.mark == -1
# Non-string instance name will raise `AssertionError`.
with pytest.raises(AssertionError):
SubClassA.get_instance(name=1)
# `get_instance` should not accept other arguments if corresponding
# instance has been created.
with pytest.raises(AssertionError):
SubClassA.get_instance('name2', a=1, b=2)
|
from __future__ import annotations
import sys
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .Router import Asym, Router
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
sys.modules["sentence_transformers.models.Asym"] = sys.modules["sentence_transformers.models.Router"]
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
"Router",
]
|
from __future__ import annotations
from .Asym import Asym, Router
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
"Router",
]
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.prototype.transforms import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[Union[str, bool]] = "warn",
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from __future__ import annotations
from typing import Any, List, Optional, Tuple, Union
import PIL.Image
import torch
from torchvision.transforms import InterpolationMode
from ._datapoint import Datapoint, FillTypeJIT
class Mask(Datapoint):
@classmethod
def _wrap(cls, tensor: torch.Tensor) -> Mask:
return tensor.as_subclass(cls)
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Mask:
if isinstance(data, PIL.Image.Image):
from torchvision.prototype.transforms import functional as F
data = F.pil_to_tensor(data)
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
return cls._wrap(tensor)
@classmethod
def wrap_like(
cls,
other: Mask,
tensor: torch.Tensor,
) -> Mask:
return cls._wrap(tensor)
@property
def spatial_size(self) -> Tuple[int, int]:
return tuple(self.shape[-2:]) # type: ignore[return-value]
def horizontal_flip(self) -> Mask:
output = self._F.horizontal_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def vertical_flip(self) -> Mask:
output = self._F.vertical_flip_mask(self.as_subclass(torch.Tensor))
return Mask.wrap_like(self, output)
def resize( # type: ignore[override]
self,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
max_size: Optional[int] = None,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resize_mask(self.as_subclass(torch.Tensor), size, max_size=max_size)
return Mask.wrap_like(self, output)
def crop(self, top: int, left: int, height: int, width: int) -> Mask:
output = self._F.crop_mask(self.as_subclass(torch.Tensor), top, left, height, width)
return Mask.wrap_like(self, output)
def center_crop(self, output_size: List[int]) -> Mask:
output = self._F.center_crop_mask(self.as_subclass(torch.Tensor), output_size=output_size)
return Mask.wrap_like(self, output)
def resized_crop(
self,
top: int,
left: int,
height: int,
width: int,
size: List[int],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
antialias: Optional[bool] = None,
) -> Mask:
output = self._F.resized_crop_mask(self.as_subclass(torch.Tensor), top, left, height, width, size=size)
return Mask.wrap_like(self, output)
def pad(
self,
padding: Union[int, List[int]],
fill: FillTypeJIT = None,
padding_mode: str = "constant",
) -> Mask:
output = self._F.pad_mask(self.as_subclass(torch.Tensor), padding, padding_mode=padding_mode, fill=fill)
return Mask.wrap_like(self, output)
def rotate(
self,
angle: float,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
expand: bool = False,
center: Optional[List[float]] = None,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.rotate_mask(self.as_subclass(torch.Tensor), angle, expand=expand, center=center, fill=fill)
return Mask.wrap_like(self, output)
def affine(
self,
angle: Union[int, float],
translate: List[float],
scale: float,
shear: List[float],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
center: Optional[List[float]] = None,
) -> Mask:
output = self._F.affine_mask(
self.as_subclass(torch.Tensor),
angle,
translate=translate,
scale=scale,
shear=shear,
fill=fill,
center=center,
)
return Mask.wrap_like(self, output)
def perspective(
self,
startpoints: Optional[List[List[int]]],
endpoints: Optional[List[List[int]]],
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
coefficients: Optional[List[float]] = None,
) -> Mask:
output = self._F.perspective_mask(
self.as_subclass(torch.Tensor), startpoints, endpoints, fill=fill, coefficients=coefficients
)
return Mask.wrap_like(self, output)
def elastic(
self,
displacement: torch.Tensor,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: FillTypeJIT = None,
) -> Mask:
output = self._F.elastic_mask(self.as_subclass(torch.Tensor), displacement, fill=fill)
return Mask.wrap_like(self, output)
|
from typing import Any, Optional
def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str:
"""
Recursively converts a Python object (from JSON) into a Markdown string.
Args:
data: The Python object to convert.
level: The current nesting level (used for indentation and heading levels).
header: Section header.
Returns:
A string containing the Markdown representation of the data.
"""
markdown_parts = []
indent = " " * level
if isinstance(data, dict):
for key, value in data.items():
heading_level = min(level + 1, 6)
markdown_parts.append(f"{indent}{'#' * heading_level} {key}\n")
markdown_parts.append(json_to_markdown(value, level + 1))
markdown_parts.append("\n")
elif isinstance(data, list):
if not data:
markdown_parts.append(f"{indent}- *Empty List*\n")
else:
if header:
markdown_parts.append(f"# {header}\n")
for index, item in enumerate(data):
if isinstance(item, (dict, list)):
markdown_parts.append(f"{indent}- Item {index + 1}:\n")
markdown_parts.append(json_to_markdown(item, level + 1))
else:
markdown_parts.append(f"{indent}- {item!s}\n")
elif isinstance(data, str):
if "\n" in data:
# nl var to enable the usage of this symbol inside f-string expressions
nl = "\n"
markdown_parts.append(f"{indent}> {data.replace(nl, nl + indent + '> ')}\n")
else:
markdown_parts.append(f"{indent}{data}\n")
elif isinstance(data, (int, float, bool)) or data is None:
markdown_parts.append(f"{indent}{data!s}\n")
else:
markdown_parts.append(f"{indent}{data!s}\n")
return "".join(markdown_parts).rstrip("\n") + "\n"
|
from typing import Any, Optional
def json_to_markdown(data: Any, level: int = 0, header: Optional[str] = None) -> str:
"""
Recursively converts a Python object (from JSON) into a Markdown string.
Args:
data: The Python object to convert.
level: The current nesting level (used for indentation and heading levels).
header: Section header.
Returns:
A string containing the Markdown representation of the data.
"""
markdown_parts = []
indent = " " * level
if isinstance(data, dict):
for key, value in data.items():
heading_level = min(level + 1, 6)
markdown_parts.append(f"{indent}{'#' * heading_level} {key}\n")
markdown_parts.append(json_to_markdown(value, level + 1))
markdown_parts.append("\n")
elif isinstance(data, list):
if not data:
markdown_parts.append(f"{indent}- *Empty List*\n")
else:
if header:
markdown_parts.append(f"# {header}\n")
for index, item in enumerate(data):
if isinstance(item, (dict, list)):
markdown_parts.append(f"{indent}- Item {index + 1}:\n")
markdown_parts.append(json_to_markdown(item, level + 1))
else:
markdown_parts.append(f"{indent}- {item!s}\n")
elif isinstance(data, str):
if "\n" in data:
# nl var to enable the usage of this symbol inside f-string expressions
nl = "\n"
markdown_parts.append(
f"{indent}> {data.replace(nl, nl + indent + '> ')}\n"
)
else:
markdown_parts.append(f"{indent}{data}\n")
elif isinstance(data, (int, float, bool)) or data is None:
markdown_parts.append(f"{indent}{data!s}\n")
else:
markdown_parts.append(f"{indent}{data!s}\n")
return "".join(markdown_parts).rstrip("\n") + "\n"
|
# coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for model Reformer."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_reformer import ReformerTokenizer
else:
ReformerTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
class ReformerTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Reformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = ReformerTokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
additional_special_tokens=[],
**kwargs,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
__all__ = ["ReformerTokenizerFast"]
|
# coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization class for model Reformer."""
import os
from shutil import copyfile
from typing import Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_reformer import ReformerTokenizer
else:
ReformerTokenizer = None
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
class ReformerTokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" Reformer tokenizer (backed by HuggingFace's *tokenizers* library). Based on
[Unigram](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models).
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
[SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
additional_special_tokens (`List[str]`, *optional*):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = ReformerTokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
additional_special_tokens=[],
**kwargs,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
@property
def can_save_slow_tokenizer(self) -> bool:
return os.path.isfile(self.vocab_file) if self.vocab_file else False
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer."
)
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
__all__ = ["ReformerTokenizerFast"]
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCoSENTLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
from __future__ import annotations
from collections.abc import Iterable
from torch import Tensor
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Need to be used in SpladeLoss or CSRLoss as a loss function.
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SpladeLoss(model=model, loss=losses.SparseCoSENTLoss(model), lambda_corpus=5e-5, all_docs=True)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
model.similarity_fn_name = "cosine"
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
raise AttributeError("SparseCoSENTLoss should not be used alone. Use it with SpladeLoss or CSRLoss.")
|
_base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
semantic_head=dict(
type='PanopticFPNHead',
num_classes=54,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
test_cfg=dict(
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
custom_hooks = []
|
_base_ = [
'../_base_/datasets/coco_panoptic.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='PanopticFPN',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
semantic_head=dict(
type='PanopticFPNHead',
num_classes=54,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
conv_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1, loss_weight=0.5)),
panoptic_fusion_head=dict(
type='HeuristicFusionHead',
num_things_classes=80,
num_stuff_classes=53),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5),
panoptic=dict(
score_thr=0.6,
max_per_img=100,
mask_thr_binary=0.5,
mask_overlap=0.5,
nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True),
stuff_area_limit=4096)))
custom_hooks = []
|
_base_ = ['./cascade-mask-rcnn_r50_fpn_1x_coco.py']
model = dict(
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
model = dict(
data_preprocessor=dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './grid-rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './grid_rcnn_r50_fpn_gn-head_2x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = 'mask-rcnn_r50_fpn_rpn-2conv_4conv1fc_syncbn-all_lsj-100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs)
train_dataloader = dict(dataset=dict(times=2))
|
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py'
# Use RepeatDataset to speed up training
# change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs)
train_dataloader = dict(dataset=dict(times=2))
|
"""ReAct output parser."""
import re
from typing import Tuple
from llama_index.core.agent.react.types import (
ActionReasoningStep,
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.output_parsers.utils import extract_json_str
from llama_index.core.types import BaseOutputParser
def extract_tool_use(input_text: str) -> Tuple[str, str, str]:
pattern = r"(?:\s*Thought: (.*?)|(.+))\n+Action: ([^\n\(\) ]+).*?\n+Action Input: .*?(\{.*\})"
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(f"Could not extract tool use from input text: {input_text}")
thought = (match.group(1) or match.group(2)).strip()
action = match.group(3).strip()
action_input = match.group(4).strip()
return thought, action, action_input
def action_input_parser(json_str: str) -> dict:
processed_string = re.sub(r"(?<!\w)\'|\'(?!\w)", '"', json_str)
pattern = r'"(\w+)":\s*"([^"]*)"'
matches = re.findall(pattern, processed_string)
return dict(matches)
def extract_final_response(input_text: str) -> Tuple[str, str]:
pattern = r"\s*Thought:(.*?)Answer:(.*?)(?:$)"
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(
f"Could not extract final answer from input text: {input_text}"
)
thought = match.group(1).strip()
answer = match.group(2).strip()
return thought, answer
def parse_action_reasoning_step(output: str) -> ActionReasoningStep:
"""
Parse an action reasoning step from the LLM output.
"""
# Weaker LLMs may generate ReActAgent steps whose Action Input are horrible JSON strings.
# `dirtyjson` is more lenient than `json` in parsing JSON strings.
import dirtyjson as json
thought, action, action_input = extract_tool_use(output)
json_str = extract_json_str(action_input)
# First we try json, if this fails we use ast
try:
action_input_dict = json.loads(json_str)
except Exception:
action_input_dict = action_input_parser(json_str)
return ActionReasoningStep(
thought=thought, action=action, action_input=action_input_dict
)
class ReActOutputParser(BaseOutputParser):
"""ReAct Output parser."""
def parse(self, output: str, is_streaming: bool = False) -> BaseReasoningStep:
"""
Parse output from ReAct agent.
We expect the output to be in one of the following formats:
1. If the agent need to use a tool to answer the question:
```
Thought: <thought>
Action: <action>
Action Input: <action_input>
```
2. If the agent can answer the question without any tools:
```
Thought: <thought>
Answer: <answer>
```
"""
if "Thought:" not in output and "Action:" not in output:
# NOTE: handle the case where the agent directly outputs the answer
# instead of following the thought-answer format
return ResponseReasoningStep(
thought="(Implicit) I can answer without any more tools!",
response=output,
is_streaming=is_streaming,
)
# An "Action" should take priority over an "Answer"
if "Action:" in output:
return parse_action_reasoning_step(output)
if "Answer:" in output:
thought, answer = extract_final_response(output)
return ResponseReasoningStep(
thought=thought, response=answer, is_streaming=is_streaming
)
raise ValueError(f"Could not parse output: {output}")
def format(self, output: str) -> str:
"""Format a query with structured output formatting instructions."""
raise NotImplementedError
|
"""ReAct output parser."""
import re
from typing import Tuple
from llama_index.core.agent.react.types import (
ActionReasoningStep,
BaseReasoningStep,
ResponseReasoningStep,
)
from llama_index.core.output_parsers.utils import extract_json_str
from llama_index.core.types import BaseOutputParser
def extract_tool_use(input_text: str) -> Tuple[str, str, str]:
pattern = (
r"\s*Thought: (.*?)\n+Action: ([^\n\(\) ]+).*?\n+Action Input: .*?(\{.*\})"
)
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(f"Could not extract tool use from input text: {input_text}")
thought = match.group(1).strip()
action = match.group(2).strip()
action_input = match.group(3).strip()
return thought, action, action_input
def action_input_parser(json_str: str) -> dict:
processed_string = re.sub(r"(?<!\w)\'|\'(?!\w)", '"', json_str)
pattern = r'"(\w+)":\s*"([^"]*)"'
matches = re.findall(pattern, processed_string)
return dict(matches)
def extract_final_response(input_text: str) -> Tuple[str, str]:
pattern = r"\s*Thought:(.*?)Answer:(.*?)(?:$)"
match = re.search(pattern, input_text, re.DOTALL)
if not match:
raise ValueError(
f"Could not extract final answer from input text: {input_text}"
)
thought = match.group(1).strip()
answer = match.group(2).strip()
return thought, answer
def parse_action_reasoning_step(output: str) -> ActionReasoningStep:
"""
Parse an action reasoning step from the LLM output.
"""
# Weaker LLMs may generate ReActAgent steps whose Action Input are horrible JSON strings.
# `dirtyjson` is more lenient than `json` in parsing JSON strings.
import dirtyjson as json
thought, action, action_input = extract_tool_use(output)
json_str = extract_json_str(action_input)
# First we try json, if this fails we use ast
try:
action_input_dict = json.loads(json_str)
except Exception:
action_input_dict = action_input_parser(json_str)
return ActionReasoningStep(
thought=thought, action=action, action_input=action_input_dict
)
class ReActOutputParser(BaseOutputParser):
"""ReAct Output parser."""
def parse(self, output: str, is_streaming: bool = False) -> BaseReasoningStep:
"""
Parse output from ReAct agent.
We expect the output to be in one of the following formats:
1. If the agent need to use a tool to answer the question:
```
Thought: <thought>
Action: <action>
Action Input: <action_input>
```
2. If the agent can answer the question without any tools:
```
Thought: <thought>
Answer: <answer>
```
"""
if "Thought:" not in output:
# NOTE: handle the case where the agent directly outputs the answer
# instead of following the thought-answer format
return ResponseReasoningStep(
thought="(Implicit) I can answer without any more tools!",
response=output,
is_streaming=is_streaming,
)
# An "Action" should take priority over an "Answer"
if "Action:" in output:
return parse_action_reasoning_step(output)
if "Answer:" in output:
thought, answer = extract_final_response(output)
return ResponseReasoningStep(
thought=thought, response=answer, is_streaming=is_streaming
)
raise ValueError(f"Could not parse output: {output}")
def format(self, output: str) -> str:
"""Format a query with structured output formatting instructions."""
raise NotImplementedError
|
# Copyright (c) OpenMMLab. All rights reserved.
from .local_visualizer import DetLocalVisualizer
from .palette import get_palette, palette_val
__all__ = ['palette_val', 'get_palette', 'DetLocalVisualizer']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .image import (color_val_matplotlib, imshow_det_bboxes,
imshow_gt_det_bboxes)
from .palette import get_palette, palette_val
__all__ = [
'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib',
'palette_val', 'get_palette'
]
|
from typing import List, Optional, TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import TEXT_EXTRA_EXTENSIONS, TEXT_MIMETYPE
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return TEXT_MIMETYPE
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return TEXT_EXTRA_EXTENSIONS
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
---
```python
from docarray import BaseDoc
from docarray.typing import TextUrl
class MyDoc(BaseDoc):
remote_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
)
remote_txt = doc.remote_url.load()
```
---
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
from typing import Optional, TypeVar
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
T = TypeVar('T', bound='TextUrl')
@_register_proto(proto_type_name='text_url')
class TextUrl(AnyUrl):
"""
URL to a text file.
Can be remote (web) URL, or a local file path.
"""
def load(self, charset: str = 'utf-8', timeout: Optional[float] = None) -> str:
"""
Load the text file into a string.
---
```python
from docarray import BaseDoc
from docarray.typing import TextUrl
class MyDoc(BaseDoc):
remote_url: TextUrl
doc = MyDoc(
remote_url='https://de.wikipedia.org/wiki/Brixen',
)
remote_txt = doc.remote_url.load()
```
---
:param timeout: timeout (sec) for urlopen network request.
Only relevant if URL is not local
:param charset: decoding charset; may be any character set registered with IANA
:return: the text file content
"""
_bytes = self.load_bytes(timeout=timeout)
return _bytes.decode(charset)
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from docarray.document import DocumentSourceType
from docarray.document.mixins.content import DocumentContentType
from jina import Document
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Module for Jina Requests."""
from typing import (
Iterator,
Union,
Tuple,
AsyncIterable,
Iterable,
Optional,
Dict,
TYPE_CHECKING,
)
from jina.clients.request.helper import _new_data_request_from_batch, _new_data_request
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from jina import Document
from docarray.document import DocumentSourceType
from docarray.document.mixins.content import DocumentContentType
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: 'GeneratorSourceType',
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
_kwargs = dict(extra_kwargs=kwargs)
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
_kwargs=kwargs,
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_ppr_parser(parser):
"""Mixing in arguments required by pod/deployment/runtime module into the given parser.
:param parser: the parser instance to which we add arguments
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title='Base Deployment')
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
parser.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--optout-telemetry',
action='store_true',
default=False,
help='If set, disables telemetry during the Flow/Pod/Runtime start.',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_ppr_parser(parser):
"""Mixing in arguments required by pod/deployment/runtime module into the given parser.
:param parser: the parser instance to which we add arguments
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title='Base Deployment')
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
parser.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.api import wrappers
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import _tf_keras
from keras.api import activations
from keras.api import applications
from keras.api import backend
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import layers
from keras.api import legacy
from keras.api import losses
from keras.api import metrics
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import preprocessing
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import saving
from keras.api import tree
from keras.api import utils
from keras.api import visualization
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
import os as _os
import sys as _sys
from pathlib import Path as _Path
import datetime as _datetime
__windows__ = _sys.platform == 'win32'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py
# "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g" 3. copy all lines EXCEPT the first (which is the grep command in the
# last line)
__jina_env__ = (
'JINA_DEFAULT_HOST',
'JINA_DEFAULT_TIMEOUT_CTRL',
'JINA_DEPLOYMENT_NAME',
'JINA_DISABLE_UVLOOP',
'JINA_EARLY_STOP',
'JINA_FULL_CLI',
'JINA_GATEWAY_IMAGE',
'JINA_GRPC_RECV_BYTES',
'JINA_GRPC_SEND_BYTES',
'JINA_HUB_NO_IMAGE_REBUILD',
'JINA_LOG_CONFIG',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_MP_START_METHOD',
'JINA_OPTOUT_TELEMETRY',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_LOCKS_ROOT',
'JINA_OPTOUT_TELEMETRY',
'JINA_K8S_ACCESS_MODES',
'JINA_K8S_STORAGE_CLASS_NAME',
'JINA_K8S_STORAGE_CAPACITY',
'JINA_STREAMER_ARGS',
)
__default_host__ = _os.environ.get(
'JINA_DEFAULT_HOST', '127.0.0.1' if __windows__ else '0.0.0.0'
)
__docker_host__ = 'host.docker.internal'
__default_executor__ = 'BaseExecutor'
__default_gateway__ = 'BaseGateway'
__default_http_gateway__ = 'HTTPGateway'
__default_composite_gateway__ = 'CompositeGateway'
__default_websocket_gateway__ = 'WebSocketGateway'
__default_grpc_gateway__ = 'GRPCGateway'
__default_endpoint__ = '/default'
__dynamic_base_gateway_hubble__ = 'jinaai+docker://jina-ai/JinaGateway:latest'
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unset_msg__ = '(unset)'
__args_executor_func__ = {
'docs',
'parameters',
'docs_matrix',
}
__args_executor_init__ = {'metas', 'requests', 'runtime_args'}
__resources_path__ = _os.path.join(
_os.path.dirname(_sys.modules['jina'].__file__), 'resources'
)
__cache_path__ = f'{_os.path.expanduser("~")}/.cache/{__package__}'
if not _Path(__cache_path__).exists():
_Path(__cache_path__).mkdir(parents=True, exist_ok=True)
_names_with_underscore = [
'__version__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__jina_env__',
'__uptime__',
'__default_endpoint__',
'__default_executor__',
'__unset_msg__',
'__windows__',
]
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend(_names_with_underscore)
RAFT_TO_EXECUTOR_PORT = 100
|
import os as _os
import sys as _sys
from pathlib import Path as _Path
import datetime as _datetime
__windows__ = _sys.platform == 'win32'
__uptime__ = _datetime.datetime.now().isoformat()
# update on MacOS 1. clean this tuple, 2. grep -rohEI --exclude-dir=jina/hub --exclude-dir=tests --include \*.py
# "\'JINA_.*?\'" jina | sort -u | sed "s/$/,/g" 3. copy all lines EXCEPT the first (which is the grep command in the
# last line)
__jina_env__ = (
'JINA_DEFAULT_HOST',
'JINA_DEFAULT_TIMEOUT_CTRL',
'JINA_DEPLOYMENT_NAME',
'JINA_DISABLE_UVLOOP',
'JINA_EARLY_STOP',
'JINA_FULL_CLI',
'JINA_GATEWAY_IMAGE',
'JINA_GRPC_RECV_BYTES',
'JINA_GRPC_SEND_BYTES',
'JINA_HUB_NO_IMAGE_REBUILD',
'JINA_LOG_CONFIG',
'JINA_LOG_LEVEL',
'JINA_LOG_NO_COLOR',
'JINA_MP_START_METHOD',
'JINA_OPTOUT_TELEMETRY',
'JINA_RANDOM_PORT_MAX',
'JINA_RANDOM_PORT_MIN',
'JINA_LOCKS_ROOT',
'JINA_OPTOUT_TELEMETRY',
'JINA_K8S_ACCESS_MODES',
'JINA_K8S_STORAGE_CLASS_NAME',
'JINA_K8S_STORAGE_CAPACITY',
'JINA_STREAMER_ARGS',
)
__default_host__ = _os.environ.get(
'JINA_DEFAULT_HOST', '127.0.0.1' if __windows__ else '0.0.0.0'
)
__docker_host__ = 'host.docker.internal'
__default_executor__ = 'BaseExecutor'
__default_gateway__ = 'BaseGateway'
__default_http_gateway__ = 'HTTPGateway'
__default_composite_gateway__ = 'CompositeGateway'
__default_websocket_gateway__ = 'WebSocketGateway'
__default_grpc_gateway__ = 'GRPCGateway'
__default_endpoint__ = '/default'
__ready_msg__ = 'ready and listening'
__stop_msg__ = 'terminated'
__unset_msg__ = '(unset)'
__args_executor_func__ = {
'docs',
'parameters',
'docs_matrix',
}
__args_executor_init__ = {'metas', 'requests', 'runtime_args'}
__resources_path__ = _os.path.join(
_os.path.dirname(_sys.modules['jina'].__file__), 'resources'
)
__cache_path__ = f'{_os.path.expanduser("~")}/.cache/{__package__}'
if not _Path(__cache_path__).exists():
_Path(__cache_path__).mkdir(parents=True, exist_ok=True)
_names_with_underscore = [
'__version__',
'__proto_version__',
'__default_host__',
'__ready_msg__',
'__stop_msg__',
'__jina_env__',
'__uptime__',
'__default_endpoint__',
'__default_executor__',
'__unset_msg__',
'__windows__',
]
__all__ = [_s for _s in dir() if not _s.startswith('_')]
__all__.extend(_names_with_underscore)
RAFT_TO_EXECUTOR_PORT = 100
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '2.0.0rc0'
mmcv_maximum_version = '2.0.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, short_version
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
mmcv_minimum_version = '1.3.17'
mmcv_maximum_version = '1.6.0'
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_version >= digit_version(mmcv_minimum_version)
and mmcv_version <= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
__all__ = ['__version__', 'short_version']
|
import time
import http.client
import json
from typing import List, Optional, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
class GalaxiaClient:
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int,
wait_time: int,
):
self.api_url = api_url
self.api_key = api_key
self.knowledge_base_id = knowledge_base_id
self.n_retries = n_retries
self.wait_time = wait_time
self.headers = {"X-Api-Key": api_key, "Content-Type": "application/json"}
def initialize(
self,
conn: http.client.HTTPSConnection,
question: str,
) -> dict:
payload_0 = '{\n "algorithmVersion":"%s",\n' % self.knowledge_base_id
payload_1 = ' "text":"%s" \n}' % question.replace('"', '\\"')
payload = payload_0 + payload_1
conn.request("POST", "/analyze/initialize", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def check_status(
self,
conn: http.client.HTTPSConnection,
init_res: dict,
) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/status", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def get_result(self, conn: http.client.HTTPSConnection, init_res: dict) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/result", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def retrieve(
self,
query: str,
) -> Union[dict, None]:
conn = http.client.HTTPSConnection(self.api_url)
flag_init = False
for i in range(self.n_retries):
init_res = self.initialize(conn, query)
if "operationId" in init_res:
flag_init = True
break
time.sleep(self.wait_time * i)
if not flag_init:
# failed to init
return None
flag_proc = False
for i in range(1, self.n_retries + 1):
time.sleep(self.wait_time * i)
status = self.check_status(conn, init_res)
if status["status"] == "processed":
flag_proc = True
break
if flag_proc:
res = self.get_result(conn, init_res)
return res["result"]["resultItems"]
else:
# failed to process
return None
class GalaxiaRetriever(BaseRetriever):
"""Galaxia knowledge retriever.
before using the API create your knowledge base here:
beta.cloud.smabbler.com/
learn more here:
https://smabbler.gitbook.io/smabbler/api-rag/smabblers-api-rag
Args:
api_url : url of galaxia API, e.g. "https://beta.api.smabbler.com"
api_key : API key
knowledge_base_id : ID of the knowledge base (galaxia model)
Example:
.. code-block:: python
from llama_index.retrievers.galaxia import GalaxiaRetriever
from llama_index.core.schema import QueryBundle
retriever = GalaxiaRetriever(
api_url="beta.api.smabbler.com",
api_key="<key>",
knowledge_base_id="<knowledge_base_id>",
)
result = retriever._retrieve(QueryBundle(
"<test question>"
))
print(result)
"""
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int = 20,
wait_time: int = 2,
callback_manager: Optional[CallbackManager] = None,
):
self._client = GalaxiaClient(
api_url, api_key, knowledge_base_id, n_retries, wait_time
)
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
response = self._client.retrieve(query)
if response is None:
return []
node_with_score = []
for res in response:
node_with_score.append(
NodeWithScore(
node=TextNode(
text=res["category"],
metadata={
"model": res["model"],
"file": res["group"],
},
),
score=res["rank"],
)
)
return node_with_score
|
import time
import http.client
import json
from typing import List, Optional, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.schema import NodeWithScore, QueryBundle, TextNode
class GalaxiaClient:
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int,
wait_time: int,
):
self.api_url = api_url
self.api_key = api_key
self.knowledge_base_id = knowledge_base_id
self.n_retries = n_retries
self.wait_time = wait_time
self.headers = {"X-Api-Key": api_key, "Content-Type": "application/json"}
def initialize(
self,
conn: http.client.HTTPSConnection,
question: str,
) -> dict:
payload_0 = '{\n "algorithmVersion":"%s",\n' % self.knowledge_base_id
payload_1 = ' "text":"%s" \n}' % question.replace('"', '\\"')
payload = payload_0 + payload_1
conn.request("POST", "/analyze/initialize", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def check_status(
self,
conn: http.client.HTTPSConnection,
init_res: dict,
) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/status", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def get_result(self, conn: http.client.HTTPSConnection, init_res: dict) -> dict:
payload = '{\n "operationId": "%s"\n}' % init_res["operationId"]
conn.request("POST", "/analyze/result", payload, self.headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))
def retrieve(
self,
query: str,
) -> Union[dict, None]:
conn = http.client.HTTPSConnection(self.api_url)
flag_init = False
for i in range(self.n_retries):
init_res = self.initialize(conn, query)
if "operationId" in init_res:
flag_init = True
break
time.sleep(self.wait_time * i)
if not flag_init:
# failed to init
return None
flag_proc = False
for i in range(1, self.n_retries + 1):
time.sleep(self.wait_time * i)
status = self.check_status(conn, init_res)
if status["status"] == "processed":
flag_proc = True
break
if flag_proc:
res = self.get_result(conn, init_res)
return res["result"]["resultItems"]
else:
# failed to process
return None
class GalaxiaRetriever(BaseRetriever):
"""Galaxia knowledge retriever.
before using the API create your knowledge base here:
beta.cloud.smabbler.com/
learn more here:
https://smabbler.gitbook.io/smabbler/api-rag/smabblers-api-rag
Args:
api_url : url of galaxia API, e.g. "https://beta.api.smabbler.com"
api_key : API key
knowledge_base_id : ID of the knowledge base (galaxia model)
Example:
.. code-block:: python
from llama_index.retrievers.galaxia import GalaxiaRetriever
from llama_index.core.schema import QueryBundle
retriever = GalaxiaRetriever(
api_url="https://beta.api.smabbler.com",
api_key="<key>",
knowledge_base_id="<knowledge_base_id>",
)
result = retriever._retrieve(QueryBundle(
"<test question>"
))
print(result)
"""
def __init__(
self,
api_url: str,
api_key: str,
knowledge_base_id: str,
n_retries: int = 20,
wait_time: int = 2,
callback_manager: Optional[CallbackManager] = None,
):
self._client = GalaxiaClient(
api_url, api_key, knowledge_base_id, n_retries, wait_time
)
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
response = self._client.retrieve(query)
if response is None:
return []
node_with_score = []
for res in response:
node_with_score.append(
NodeWithScore(
node=TextNode(
text=res["category"],
metadata={
"model": res["model"],
"file": res["group"],
},
),
score=res["rank"],
)
)
return node_with_score
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
init_cfg=None):
super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
featmap_strides, init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
if torch.onnx.is_in_onnx_export():
# Work around to export mask-rcnn to onnx
roi_feats = rois[:, :1].clone().detach()
roi_feats = roi_feats.expand(*expand_dims)
roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
roi_feats = roi_feats * 0
else:
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
if torch.onnx.is_in_onnx_export():
# To keep all roi_align nodes exported to onnx
# and skip nonzero op
mask = mask.float().unsqueeze(-1)
# select target level rois and reset the rest rois to zero.
rois_i = rois.clone().detach()
rois_i *= mask
mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)
roi_feats_t = self.roi_layers[i](feats[i], rois_i)
roi_feats_t *= mask_exp
roi_feats += roi_feats_t
continue
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
import torch
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
init_cfg=None):
super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
featmap_strides, init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
if torch.onnx.is_in_onnx_export():
# Work around to export mask-rcnn to onnx
roi_feats = rois[:, :1].clone().detach()
roi_feats = roi_feats.expand(*expand_dims)
roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
roi_feats = roi_feats * 0
else:
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# TODO: remove this when parrots supports
if torch.__version__ == 'parrots':
roi_feats.requires_grad = True
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
if torch.onnx.is_in_onnx_export():
# To keep all roi_align nodes exported to onnx
# and skip nonzero op
mask = mask.float().unsqueeze(-1)
# select target level rois and reset the rest rois to zero.
rois_i = rois.clone().detach()
rois_i *= mask
mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)
roi_feats_t = self.roi_layers[i](feats[i], rois_i)
roi_feats_t *= mask_exp
roi_feats += roi_feats_t
continue
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats += sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina._docarray import Document
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from jina._docarray import Document
from jina._docarray.document import DocumentSourceType
from jina._docarray.document.mixins.content import DocumentContentType
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: Optional['GeneratorSourceType'] = None,
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable) or isinstance(data, Document):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
"""Module for Jina Requests."""
from typing import (
TYPE_CHECKING,
AsyncIterable,
Dict,
Iterable,
Iterator,
Optional,
Tuple,
Union,
)
from jina.clients.request.helper import _new_data_request, _new_data_request_from_batch
from jina.enums import DataInputType
from jina.helper import batch_iterator
from jina.logging.predefined import default_logger
if TYPE_CHECKING: # pragma: no cover
from docarray.document import DocumentSourceType
from docarray.document.mixins.content import DocumentContentType
from docarray import Document
from jina.types.request import Request
SingletonDataType = Union[
DocumentContentType,
DocumentSourceType,
Document,
Tuple[DocumentContentType, DocumentContentType],
Tuple[DocumentSourceType, DocumentSourceType],
]
GeneratorSourceType = Union[
Document, Iterable[SingletonDataType], AsyncIterable[SingletonDataType]
]
def request_generator(
exec_endpoint: str,
data: Optional['GeneratorSourceType'] = None,
request_size: int = 0,
data_type: DataInputType = DataInputType.AUTO,
target_executor: Optional[str] = None,
parameters: Optional[Dict] = None,
**kwargs, # do not remove this, add on purpose to suppress unknown kwargs
) -> Iterator['Request']:
"""Generate a request iterator.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param data: data to send, a list of dict/string/bytes that can be converted into a list of `Document` objects
:param request_size: the number of the `Documents` in each request
:param data_type: if ``data`` is an iterator over self-contained document, i.e. :class:`DocumentSourceType`;
or an iterator over possible Document content (set to text, blob and buffer).
:param parameters: a dictionary of parameters to be sent to the executor
:param target_executor: a regex string. Only matching Executors will process the request.
:param kwargs: additional arguments
:yield: request
"""
try:
if data is None:
# this allows empty inputs, i.e. a data request with only parameters
yield _new_data_request(
endpoint=exec_endpoint, target=target_executor, parameters=parameters
)
else:
if not isinstance(data, Iterable):
data = [data]
for batch in batch_iterator(data, request_size):
yield _new_data_request_from_batch(
batch=batch,
data_type=data_type,
endpoint=exec_endpoint,
target=target_executor,
parameters=parameters,
)
except Exception as ex:
# must be handled here, as grpc channel wont handle Python exception
default_logger.critical(f'inputs is not valid! {ex!r}', exc_info=True)
raise
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
from __future__ import annotations
import re
import pytest
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import NanoBEIREvaluator
from sentence_transformers.util import is_datasets_available
from tests.utils import is_ci
if not is_datasets_available():
pytest.skip(
reason="Datasets are not installed. Please install `datasets` with `pip install datasets`",
allow_module_level=True,
)
if is_ci():
pytest.skip(
reason="Skip test in CI to try and avoid 429 Client Error",
allow_module_level=True,
)
def test_nanobeir_evaluator(stsb_bert_tiny_model_reused: SentenceTransformer):
"""Tests that the NanoBERTEvaluator can be loaded and produces expected metrics"""
datasets = ["QuoraRetrieval", "MSMARCO"]
query_prompts = {
"QuoraRetrieval": "Instruct: Given a question, retrieve questions that are semantically equivalent to the given question\\nQuery: ",
"MSMARCO": "Instruct: Given a web search query, retrieve relevant passages that answer the query\\nQuery: ",
}
model = stsb_bert_tiny_model_reused
evaluator = NanoBEIREvaluator(
dataset_names=datasets,
query_prompts=query_prompts,
)
results = evaluator(model)
assert len(results) > 0
assert all(isinstance(results[metric], float) for metric in results)
def test_nanobeir_evaluator_with_invalid_dataset():
"""Test that NanoBEIREvaluator raises an error for invalid dataset names."""
invalid_datasets = ["invalidDataset"]
with pytest.raises(
ValueError,
match=re.escape(
r"Dataset(s) ['invalidDataset'] not found in the NanoBEIR collection. "
r"Valid dataset names are: ['climatefever', 'dbpedia', 'fever', 'fiqa2018', 'hotpotqa', 'msmarco', 'nfcorpus', 'nq', 'quoraretrieval', 'scidocs', 'arguana', 'scifact', 'touche2020']"
),
):
NanoBEIREvaluator(dataset_names=invalid_datasets)
def test_nanobeir_evaluator_empty_inputs():
"""Test that NanoBEIREvaluator behaves correctly with empty datasets."""
with pytest.raises(ValueError, match="dataset_names cannot be empty. Use None to evaluate on all datasets."):
NanoBEIREvaluator(dataset_names=[])
|
_base_ = './rpn_r50_fpn_1x_coco.py'
# use caffe img_norm
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './rpn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_label=False),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RepPointsDetector(SingleStageDetector):
"""RepPoints: Point Set Representation for Object Detection.
This detector is the implementation of:
- RepPoints detector (https://arxiv.org/pdf/1904.11490)
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core.utils import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class RepPointsDetector(SingleStageDetector):
"""RepPoints: Point Set Representation for Object Detection.
This detector is the implementation of:
- RepPoints detector (https://arxiv.org/pdf/1904.11490)
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(self._map_id(x))
elif isinstance(x, Document):
return self._client.data_object.exists(self._map_id(x.id))
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayWeaviate` object"""
super().__del__()
if (
not self._persist
and len(_REGISTRY[self.__class__.__name__][self._class_name]) == 1
):
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
_REGISTRY[self.__class__.__name__][self._class_name].remove(self)
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(batch_size=50) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(self._map_id(x))
elif isinstance(x, Document):
return self._client.data_object.exists(self._map_id(x.id))
else:
return False
def __del__(self):
"""Delete this :class:`DocumentArrayWeaviate` object"""
super().__del__()
if (
not self._persist
and len(_REGISTRY[self.__class__.__name__][self._class_name]) == 1
):
self._client.schema.delete_class(self._class_name)
self._client.schema.delete_class(self._meta_name)
_REGISTRY[self.__class__.__name__][self._class_name].remove(self)
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def extend(self, values: Iterable['Document']) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(batch_size=50) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
from typing import Any, Dict, Tuple, Union
import numpy as np
import PIL.Image
import torch
from torchvision.io.video import read_video
from torchvision.prototype import features
from torchvision.prototype.utils._internal import ReadOnlyTensorBuffer
from torchvision.transforms import functional as _F
@torch.jit.unused
def decode_image_with_pil(encoded_image: torch.Tensor) -> features.Image:
image = torch.as_tensor(np.array(PIL.Image.open(ReadOnlyTensorBuffer(encoded_image)), copy=True))
if image.ndim == 2:
image = image.unsqueeze(2)
return features.Image(image.permute(2, 0, 1))
@torch.jit.unused
def decode_video_with_av(encoded_video: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]:
import unittest.mock
with unittest.mock.patch("torchvision.io.video.os.path.exists", return_value=True):
return read_video(ReadOnlyTensorBuffer(encoded_video)) # type: ignore[arg-type]
@torch.jit.unused
def to_image_tensor(image: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(image, np.ndarray):
output = torch.from_numpy(image).permute((2, 0, 1)).contiguous()
elif isinstance(image, PIL.Image.Image):
output = pil_to_tensor(image)
else: # isinstance(inpt, torch.Tensor):
output = image
return features.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
import unittest.mock
from typing import Any, Dict, Tuple, Union
import numpy as np
import PIL.Image
import torch
from torchvision.io.video import read_video
from torchvision.prototype import features
from torchvision.prototype.utils._internal import ReadOnlyTensorBuffer
from torchvision.transforms import functional as _F
@torch.jit.unused
def decode_image_with_pil(encoded_image: torch.Tensor) -> features.Image:
image = torch.as_tensor(np.array(PIL.Image.open(ReadOnlyTensorBuffer(encoded_image)), copy=True))
if image.ndim == 2:
image = image.unsqueeze(2)
return features.Image(image.permute(2, 0, 1))
@torch.jit.unused
def decode_video_with_av(encoded_video: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]:
with unittest.mock.patch("torchvision.io.video.os.path.exists", return_value=True):
return read_video(ReadOnlyTensorBuffer(encoded_video)) # type: ignore[arg-type]
@torch.jit.unused
def to_image_tensor(image: Union[torch.Tensor, PIL.Image.Image, np.ndarray]) -> features.Image:
if isinstance(image, np.ndarray):
output = torch.from_numpy(image).permute((2, 0, 1)).contiguous()
elif isinstance(image, PIL.Image.Image):
output = pil_to_tensor(image)
else: # isinstance(inpt, torch.Tensor):
output = image
return features.Image(output)
to_image_pil = _F.to_pil_image
pil_to_tensor = _F.pil_to_tensor
# We changed the names to align them with the new naming scheme. Still, `to_pil_image` is
# prevalent and well understood. Thus, we just alias it without deprecating the old name.
to_pil_image = to_image_pil
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
def _ignore_google_warnings():
import warnings
warnings.filterwarnings(
'ignore',
category=DeprecationWarning,
message='Deprecated call to `pkg_resources.declare_namespace(\'google\')`.',
append=True
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning, append=True)
_ignore_google_warnings()
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(f'multiprocessing start method is set to `fork`')
except Exception as e:
_warnings.warn(f'failed to set multiprocessing start_method to `fork`: {e!r}')
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.19.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
"""
Top-level module of Jina.
The primary function of this module is to import all of the public Jina
interfaces into a single place. The interfaces themselves are located in
sub-modules, as described below.
"""
import os as _os
import platform as _platform
import signal as _signal
import sys as _sys
import warnings as _warnings
import docarray as _docarray
if _sys.version_info < (3, 7, 0):
raise OSError(f'Jina requires Python >= 3.7, but yours is {_sys.version_info}')
def _warning_on_one_line(message, category, filename, lineno, *args, **kwargs):
return '\033[1;33m%s: %s\033[0m \033[1;30m(raised from %s:%s)\033[0m\n' % (
category.__name__,
message,
filename,
lineno,
)
_warnings.formatwarning = _warning_on_one_line
_warnings.simplefilter('always', DeprecationWarning)
# fix fork error on MacOS but seems no effect? must do EXPORT manually before jina start
_os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
# JINA_MP_START_METHOD has higher priority than os-patch
_start_method = _os.environ.get('JINA_MP_START_METHOD', None)
if _start_method and _start_method.lower() in {'fork', 'spawn', 'forkserver'}:
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method(_start_method.lower())
_warnings.warn(
f'multiprocessing start method is set to `{_start_method.lower()}`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `{_start_method.lower()}`: {e!r}'
)
elif _sys.version_info >= (3, 8, 0) and _platform.system() == 'Darwin':
# DO SOME OS-WISE PATCHES
# temporary fix for python 3.8 on macos where the default start is set to "spawn"
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
from multiprocessing import set_start_method as _set_start_method
try:
_set_start_method('fork')
_warnings.warn(
f'multiprocessing start method is set to `fork`'
)
except Exception as e:
_warnings.warn(
f'failed to set multiprocessing start_method to `fork`: {e!r}'
)
# do not change this line manually
# this is managed by git tag and updated on every release
# NOTE: this represents the NEXT release version
__version__ = '3.19.2'
# do not change this line manually
# this is managed by proto/build-proto.sh and updated on every execution
__proto_version__ = '0.1.27'
try:
__docarray_version__ = _docarray.__version__
except AttributeError as e:
raise RuntimeError(
'`docarray` dependency is not installed correctly, please reinstall with `pip install -U --force-reinstall docarray`'
)
try:
_signal.signal(_signal.SIGINT, _signal.default_int_handler)
except Exception as exc:
_warnings.warn(f'failed to set default signal handler: {exc!r}`')
def _set_nofile(nofile_atleast=4096):
"""
Set nofile soft limit to at least 4096, useful for running matlplotlib/seaborn on
parallel executing plot generators vs. Ubuntu default ulimit -n 1024 or OS X El Captian 256
temporary setting extinguishing with Python session.
:param nofile_atleast: nofile soft limit
:return: nofile soft limit and nofile hard limit
"""
try:
import resource as res
except ImportError: # Windows
res = None
if res is None:
return (None,) * 2
soft, ohard = res.getrlimit(res.RLIMIT_NOFILE)
hard = ohard
if soft < nofile_atleast:
soft = nofile_atleast
if hard < soft:
hard = soft
try:
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except (ValueError, res.error):
try:
hard = soft
print(f'trouble with max limit, retrying with soft,hard {soft},{hard}')
res.setrlimit(res.RLIMIT_NOFILE, (soft, hard))
except Exception:
print('failed to set ulimit, giving up')
soft, hard = res.getrlimit(res.RLIMIT_NOFILE)
return soft, hard
_set_nofile()
# ONLY FIRST CLASS CITIZENS ARE ALLOWED HERE, namely Document, Executor Flow
# Document
from jina._docarray import Document, DocumentArray
# Client
from jina.clients import Client
# Deployment
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.asyncio import AsyncFlow
# Flow
from jina.orchestrate.flow.base import Flow
# Executor
from jina.serve.executors import BaseExecutor as Executor
from jina.serve.executors.decorators import dynamic_batching, monitor, requests
# Custom Gateway
from jina.serve.runtimes.gateway.gateway import Gateway
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
import torch.nn as nn
from torch.optim import SGD
from mmengine.hooks import RuntimeInfoHook
from mmengine.logging import MessageHub
from mmengine.optim import OptimWrapper, OptimWrapperDict
class TestRuntimeInfoHook(TestCase):
def test_before_run(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_run')
runner = Mock()
runner.epoch = 3
runner.iter = 30
runner.max_epochs = 4
runner.max_iters = 40
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_run(runner)
self.assertEqual(message_hub.get_info('epoch'), 3)
self.assertEqual(message_hub.get_info('iter'), 30)
self.assertEqual(message_hub.get_info('max_epochs'), 4)
self.assertEqual(message_hub.get_info('max_iters'), 40)
def test_before_train(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train')
runner = Mock()
runner.epoch = 7
runner.iter = 71
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train(runner)
self.assertEqual(message_hub.get_info('epoch'), 7)
self.assertEqual(message_hub.get_info('iter'), 71)
def test_before_train_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_epoch')
runner = Mock()
runner.epoch = 9
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_epoch(runner)
self.assertEqual(message_hub.get_info('epoch'), 9)
def test_before_train_iter(self):
model = nn.Linear(1, 1)
optim1 = SGD(model.parameters(), lr=0.01)
optim2 = SGD(model.parameters(), lr=0.02)
optim_wrapper1 = OptimWrapper(optim1)
optim_wrapper2 = OptimWrapper(optim2)
optim_wrapper_dict = OptimWrapperDict(
key1=optim_wrapper1, key2=optim_wrapper2)
# single optimizer
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
runner.optim_wrapper = optim_wrapper1
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(message_hub.get_scalar('train/lr').current(), 0.01)
with self.assertRaisesRegex(AssertionError,
'runner.optim_wrapper.get_lr()'):
runner.optim_wrapper = Mock()
runner.optim_wrapper.get_lr = Mock(return_value='error type')
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
# multiple optimizers
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
optimizer1 = Mock()
optimizer1.param_groups = [{'lr': 0.01}]
optimizer2 = Mock()
optimizer2.param_groups = [{'lr': 0.02}]
runner.message_hub = message_hub
runner.optim_wrapper = optim_wrapper_dict
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(
message_hub.get_scalar('train/key1.lr').current(), 0.01)
self.assertEqual(
message_hub.get_scalar('train/key2.lr').current(), 0.02)
def test_after_train_iter(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_train_iter')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_train_iter(
runner,
batch_idx=2,
data_batch=None,
outputs={'log_vars': {
'loss_cls': 1.111
}})
self.assertEqual(
message_hub.get_scalar('train/loss_cls').current(), 1.111)
def test_after_val_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_val_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_val_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('val/acc').current(), 0.8)
def test_after_test_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_test_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_test_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('test/acc').current(), 0.8)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import Mock
from mmengine.hooks import RuntimeInfoHook
from mmengine.logging import MessageHub
class TestRuntimeInfoHook(TestCase):
def test_before_run(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_run')
runner = Mock()
runner.epoch = 3
runner.iter = 30
runner.max_epochs = 4
runner.max_iters = 40
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_run(runner)
self.assertEqual(message_hub.get_info('epoch'), 3)
self.assertEqual(message_hub.get_info('iter'), 30)
self.assertEqual(message_hub.get_info('max_epochs'), 4)
self.assertEqual(message_hub.get_info('max_iters'), 40)
def test_before_train(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train')
runner = Mock()
runner.epoch = 7
runner.iter = 71
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train(runner)
self.assertEqual(message_hub.get_info('epoch'), 7)
self.assertEqual(message_hub.get_info('iter'), 71)
def test_before_train_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_epoch')
runner = Mock()
runner.epoch = 9
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_epoch(runner)
self.assertEqual(message_hub.get_info('epoch'), 9)
def test_before_train_iter(self):
# single optimizer
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
runner.optimizer.param_groups = [{'lr': 0.01}]
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(message_hub.get_scalar('train/lr').current(), 0.01)
# multiple optimizers
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_before_train_iter')
runner = Mock()
runner.iter = 9
optimizer1 = Mock()
optimizer1.param_groups = [{'lr': 0.01}]
optimizer2 = Mock()
optimizer2.param_groups = [{'lr': 0.02}]
runner.optimizer = dict(key1=optimizer1, key2=optimizer2)
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.before_train_iter(runner, batch_idx=2, data_batch=None)
self.assertEqual(message_hub.get_info('iter'), 9)
self.assertEqual(
message_hub.get_scalar('train/key1.lr').current(), 0.01)
self.assertEqual(
message_hub.get_scalar('train/key2.lr').current(), 0.02)
def test_after_train_iter(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_train_iter')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_train_iter(
runner,
batch_idx=2,
data_batch=None,
outputs={'log_vars': {
'loss_cls': 1.111
}})
self.assertEqual(
message_hub.get_scalar('train/loss_cls').current(), 1.111)
def test_after_val_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_val_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_val_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('val/acc').current(), 0.8)
def test_after_test_epoch(self):
message_hub = MessageHub.get_instance(
'runtime_info_hook_test_after_test_epoch')
runner = Mock()
runner.message_hub = message_hub
hook = RuntimeInfoHook()
hook.after_test_epoch(runner, metrics={'acc': 0.8})
self.assertEqual(message_hub.get_scalar('test/acc').current(), 0.8)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def jax_cosine_distance(emb_1, emb_2, eps=1e-12):
norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T
norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T
return jnp.matmul(norm_emb_1, norm_emb_2.T)
class FlaxStableDiffusionSafetyCheckerModule(nn.Module):
config: CLIPConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.vision_model = FlaxCLIPVisionModule(self.config.vision_config)
self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype)
self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim))
self.special_care_embeds = self.param(
"special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim)
)
self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,))
self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,))
def __call__(self, clip_input):
pooled_output = self.vision_model(clip_input)[1]
image_embeds = self.visual_projection(pooled_output)
special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds)
cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
adjustment = 0.0
special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
special_scores = jnp.round(special_scores, 3)
is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True)
# Use a lower threshold if an image has any special care concept
special_adjustment = is_special_care * 0.01
concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
concept_scores = jnp.round(concept_scores, 3)
has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1)
return has_nsfw_concepts
class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel):
config_class = CLIPConfig
main_input_name = "clip_input"
module_class = FlaxStableDiffusionSafetyCheckerModule
def __init__(
self,
config: CLIPConfig,
input_shape: Optional[Tuple] = None,
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
if input_shape is None:
input_shape = (1, 224, 224, 3)
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.Array, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensor
clip_input = jax.random.normal(rng, input_shape)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
random_params = self.module.init(rngs, clip_input)["params"]
return random_params
def __call__(
self,
clip_input,
params: dict = None,
):
clip_input = jnp.transpose(clip_input, (0, 2, 3, 1))
return self.module.apply(
{"params": params or self.params},
jnp.array(clip_input, dtype=jnp.float32),
rngs={},
)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def jax_cosine_distance(emb_1, emb_2, eps=1e-12):
norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T
norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T
return jnp.matmul(norm_emb_1, norm_emb_2.T)
class FlaxStableDiffusionSafetyCheckerModule(nn.Module):
config: CLIPConfig
dtype: jnp.dtype = jnp.float32
def setup(self):
self.vision_model = FlaxCLIPVisionModule(self.config.vision_config)
self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype)
self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim))
self.special_care_embeds = self.param(
"special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim)
)
self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,))
self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,))
def __call__(self, clip_input):
pooled_output = self.vision_model(clip_input)[1]
image_embeds = self.visual_projection(pooled_output)
special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds)
cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
adjustment = 0.0
special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
special_scores = jnp.round(special_scores, 3)
is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True)
# Use a lower threshold if an image has any special care concept
special_adjustment = is_special_care * 0.01
concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
concept_scores = jnp.round(concept_scores, 3)
has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1)
return has_nsfw_concepts
class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel):
config_class = CLIPConfig
main_input_name = "clip_input"
module_class = FlaxStableDiffusionSafetyCheckerModule
def __init__(
self,
config: CLIPConfig,
input_shape: Optional[Tuple] = None,
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
if input_shape is None:
input_shape = (1, 224, 224, 3)
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.Array, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensor
clip_input = jax.random.normal(rng, input_shape)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
random_params = self.module.init(rngs, clip_input)["params"]
return random_params
def __call__(
self,
clip_input,
params: dict = None,
):
clip_input = jnp.transpose(clip_input, (0, 2, 3, 1))
return self.module.apply(
{"params": params or self.params},
jnp.array(clip_input, dtype=jnp.float32),
rngs={},
)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_ppr_parser(parser):
"""Mixing in arguments required by pod/deployment/runtime module into the given parser.
:param parser: the parser instance to which we add arguments
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title='Base Deployment')
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
parser.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
gp.add_argument(
'--exit-on-exceptions',
type=str,
default=[],
nargs='*',
help='List of exceptions that will cause the Executor to shut down.',
)
|
"""Base argparser module for Pod and Deployment runtime"""
import argparse
import os
from jina.enums import PollingType
from jina.helper import random_identity
from jina.parsers.helper import _SHOW_ALL_ARGS, add_arg_group
def mixin_essential_parser(parser):
"""Mixing in arguments required by every module into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Essential')
gp.add_argument(
'--name',
type=str,
help='''
The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
''',
)
gp.add_argument(
'--workspace',
type=str,
default=None,
help='The working directory for any IO operations in this object. '
'If not set, then derive from its parent `workspace`.',
)
gp.add_argument(
'--log-config',
type=str,
default='default',
help='The YAML config of the logger used in this object.',
)
gp.add_argument(
'--quiet',
action='store_true',
default=False,
help='If set, then no log will be emitted from this object.',
)
gp.add_argument(
'--quiet-error',
action='store_true',
default=False,
help='If set, then exception stack information will not be added to the log',
)
gp.add_argument(
'--workspace-id',
type=str,
default=random_identity(),
help='the UUID for identifying the workspace. When not given a random id will be assigned.'
'Multiple Pod/Deployment/Flow will work under the same workspace if they share the same '
'`workspace-id`.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
def mixin_base_ppr_parser(parser):
"""Mixing in arguments required by pod/deployment/runtime module into the given parser.
:param parser: the parser instance to which we add arguments
"""
mixin_essential_parser(parser)
gp = add_arg_group(parser, title='Base Deployment')
gp.add_argument(
'--extra-search-paths',
type=str,
default=[],
nargs='*',
help='Extra search paths to be used when loading modules and finding YAML config files.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--timeout-ctrl',
type=int,
default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),
help='The timeout in milliseconds of the control request, -1 for waiting forever',
)
parser.add_argument(
'--k8s-namespace',
type=str,
help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--polling',
type=str,
default=PollingType.ANY.name,
help='''
The polling strategy of the Deployment and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Deployment or by endpoint.
Define per Deployment:
- ANY: only one (whoever is idle) Pod polls the message
- ALL: all Pods poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
''',
)
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super().__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> CLIPModel:
return CLIPModel(model_name=input_path)
|
from __future__ import annotations
import torch
import transformers
from PIL import Image
from torch import nn
class CLIPModel(nn.Module):
def __init__(self, model_name: str = "openai/clip-vit-base-patch32", processor_name=None) -> None:
super(CLIPModel, self).__init__()
if processor_name is None:
processor_name = model_name
self.model = transformers.CLIPModel.from_pretrained(model_name)
self.processor = transformers.CLIPProcessor.from_pretrained(processor_name)
def __repr__(self) -> str:
return "CLIPModel()"
def forward(self, features: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
image_embeds = []
text_embeds = []
if "pixel_values" in features:
vision_outputs = self.model.vision_model(pixel_values=features["pixel_values"])
image_embeds = self.model.visual_projection(vision_outputs[1])
if "input_ids" in features:
text_outputs = self.model.text_model(
input_ids=features.get("input_ids"),
attention_mask=features.get("attention_mask", None),
position_ids=features.get("position_ids", None),
output_attentions=features.get("output_attentions", None),
output_hidden_states=features.get("output_hidden_states", None),
)
text_embeds = self.model.text_projection(text_outputs[1])
sentence_embedding = []
image_features = iter(image_embeds)
text_features = iter(text_embeds)
for idx, input_type in enumerate(features["image_text_info"]):
if input_type == 0:
sentence_embedding.append(next(image_features))
else:
sentence_embedding.append(next(text_features))
features["sentence_embedding"] = torch.stack(sentence_embedding).float()
return features
def tokenize(self, texts, padding: str | bool = True) -> dict[str, torch.Tensor]:
images = []
texts_values = []
image_text_info = []
for idx, data in enumerate(texts):
if isinstance(data, Image.Image): # An Image
images.append(data)
image_text_info.append(0)
else: # A text
texts_values.append(data)
image_text_info.append(1)
encoding = {}
if len(texts_values):
encoding = self.processor.tokenizer(texts_values, return_tensors="pt", padding=padding)
if len(images):
image_features = self.processor.image_processor(images, return_tensors="pt")
encoding["pixel_values"] = image_features.pixel_values
encoding["image_text_info"] = image_text_info
return dict(encoding)
@property
def tokenizer(self) -> transformers.CLIPProcessor:
return self.processor
def save(self, output_path: str) -> None:
self.model.save_pretrained(output_path)
self.processor.save_pretrained(output_path)
@staticmethod
def load(input_path: str) -> "CLIPModel":
return CLIPModel(model_name=input_path)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils.misc import is_tf_available, is_torch_available
T = TypeVar('T', bound='ImageDoc')
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
class ImageDoc(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes_: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch_available and isinstance(value, torch.Tensor))
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_document import BaseDocument
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils.misc import is_tf_available, is_torch_available
T = TypeVar('T', bound='ImageDoc')
torch_available = is_torch_available()
if torch_available:
import torch
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
class ImageDoc(BaseDocument):
"""
Document for handling images.
It can contain an ImageUrl (`Image.url`), an AnyTensor (`Image.tensor`),
and an AnyEmbedding (`Image.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
You can extend this Document:
.. code-block:: python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(url='http://www.jina.ai/image.jpg')
image.tensor = image.url.load()
model = MyEmbeddingModel()
image.embedding = model(image.tensor)
image.second_embedding = model(image.tensor)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDocument):
image: Image
text: Text
mmdoc = MultiModalDoc(
image=Image(url="http://www.jina.ai/image.jpg"),
text=Text(text="hello world, how are you doing?"),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes.load()
"""
url: Optional[ImageUrl]
tensor: Optional[ImageTensor]
embedding: Optional[AnyEmbedding]
bytes: Optional[ImageBytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch_available and isinstance(value, torch.Tensor))
or (tf_available and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
import csv
import logging
import os
from typing import List
from scipy.stats import pearsonr, spearmanr
from sentence_transformers import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: List[List[str]], scores: List[float], name: str = "", write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else "") + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info("Correlation:\tPearson: {:.4f}\tSpearman: {:.4f}".format(eval_pearson, eval_spearman))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else "w", encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
import logging
from scipy.stats import pearsonr, spearmanr
from typing import List
import os
import csv
from ... import InputExample
logger = logging.getLogger(__name__)
class CECorrelationEvaluator:
"""
This evaluator can be used with the CrossEncoder class. Given sentence pairs and continuous scores,
it compute the pearson & spearman correlation between the predicted score for the sentence pair
and the gold score.
"""
def __init__(self, sentence_pairs: List[List[str]], scores: List[float], name: str='', write_csv: bool = True):
self.sentence_pairs = sentence_pairs
self.scores = scores
self.name = name
self.csv_file = "CECorrelationEvaluator" + ("_" + name if name else '') + "_results.csv"
self.csv_headers = ["epoch", "steps", "Pearson_Correlation", "Spearman_Correlation"]
self.write_csv = write_csv
@classmethod
def from_input_examples(cls, examples: List[InputExample], **kwargs):
sentence_pairs = []
scores = []
for example in examples:
sentence_pairs.append(example.texts)
scores.append(example.label)
return cls(sentence_pairs, scores, **kwargs)
def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
if epoch != -1:
if steps == -1:
out_txt = " after epoch {}:".format(epoch)
else:
out_txt = " in epoch {} after {} steps:".format(epoch, steps)
else:
out_txt = ":"
logger.info("CECorrelationEvaluator: Evaluating the model on " + self.name + " dataset" + out_txt)
pred_scores = model.predict(self.sentence_pairs, convert_to_numpy=True, show_progress_bar=False)
eval_pearson, _ = pearsonr(self.scores, pred_scores)
eval_spearman, _ = spearmanr(self.scores, pred_scores)
logger.info("Correlation:\tPearson: {:.4f}\tSpearman: {:.4f}".format(eval_pearson, eval_spearman))
if output_path is not None and self.write_csv:
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode="a" if output_file_exists else 'w', encoding="utf-8") as f:
writer = csv.writer(f)
if not output_file_exists:
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, eval_pearson, eval_spearman])
return eval_spearman
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='ImageDoc')
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain:
- an [`ImageUrl`][docarray.typing.url.ImageUrl] (`Image.url`)
- an [`ImageTensor`](../../../api_references/typing/tensor/image) (`Image.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`Image.embedding`)
- an [`ImageBytes`][docarray.typing.bytes.ImageBytes] object (`ImageDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
```
You can extend this Document:
```python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
# image.second_embedding = model(image.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
mmdoc = MultiModalDoc(
image=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes_.load()
```
"""
url: Optional[ImageUrl] = Field(
description='URL to a (potentially remote) image file that needs to be loaded',
example='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true',
default=None,
)
tensor: Optional[ImageTensor] = Field(
description='Tensor object of the image which can be specifed to one of `ImageNdArray`, `ImageTorchTensor`, `ImageTensorflowTensor`.',
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the image.',
example=[1, 0, 1],
default=None,
)
bytes_: Optional[ImageBytes] = Field(
description='Bytes object of the image which is an instance of `ImageBytes`.',
default=None,
)
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch is not None and isinstance(value, torch.Tensor))
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.typing import AnyEmbedding, ImageBytes, ImageUrl
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.image.image_tensor import ImageTensor
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='ImageDoc')
class ImageDoc(BaseDoc):
"""
Document for handling images.
It can contain:
- an [`ImageUrl`][docarray.typing.url.ImageUrl] (`Image.url`)
- an [`ImageTensor`](../../../api_references/typing/tensor/image) (`Image.tensor`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`Image.embedding`)
- an [`ImageBytes`][docarray.typing.bytes.ImageBytes] object (`ImageDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import ImageDoc
# use it directly
image = ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
```
You can extend this Document:
```python
from docarray.documents import ImageDoc
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyImage(ImageDoc):
second_embedding: Optional[AnyEmbedding]
image = MyImage(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
)
image.tensor = image.url.load()
# model = MyEmbeddingModel()
# image.embedding = model(image.tensor)
# image.second_embedding = model(image.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import ImageDoc, TextDoc
# compose it
class MultiModalDoc(BaseDoc):
image: ImageDoc
text: TextDoc
mmdoc = MultiModalDoc(
image=ImageDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/image-data/apple.png?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.image.tensor = mmdoc.image.url.load()
# or
mmdoc.image.bytes_ = mmdoc.image.url.load_bytes()
mmdoc.image.tensor = mmdoc.image.bytes_.load()
```
"""
url: Optional[ImageUrl] = None
tensor: Optional[ImageTensor] = None
embedding: Optional[AnyEmbedding] = None
bytes_: Optional[ImageBytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif (
isinstance(value, (AbstractTensor, np.ndarray))
or (torch is not None and isinstance(value, torch.Tensor))
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
elif isinstance(value, bytes):
value = cls(byte=value)
return super().validate(value)
|
_base_ = './faster-rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='CIoULoss', loss_weight=12.0))))
|
_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
roi_head=dict(
bbox_head=dict(
reg_decoded_bbox=True,
loss_bbox=dict(type='CIoULoss', loss_weight=12.0))))
|
_base_ = '../mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.