input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs):
super(SCNet, self).__init__(**kwargs)
|
from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs):
super(SCNet, self).__init__(**kwargs)
|
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.memory.vector_memory import VectorMemory
from llama_index.core.memory.simple_composable_memory import SimpleComposableMemory
from llama_index.core.memory.memory import Memory, BaseMemoryBlock, InsertMethod
from llama_index.core.memory.memory_blocks import (
StaticMemoryBlock,
VectorMemoryBlock,
FactExtractionMemoryBlock,
)
__all__ = [
"BaseMemory",
"Memory",
"StaticMemoryBlock",
"VectorMemoryBlock",
"FactExtractionMemoryBlock",
"BaseMemoryBlock",
"InsertMethod",
# Deprecated
"ChatMemoryBuffer",
"ChatSummaryMemoryBuffer",
"SimpleComposableMemory",
"VectorMemory",
]
|
from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.memory.vector_memory import VectorMemory
from llama_index.core.memory.simple_composable_memory import SimpleComposableMemory
from llama_index.core.memory.memory import Memory
from llama_index.core.memory.memory_blocks import (
StaticMemoryBlock,
VectorMemoryBlock,
FactExtractionMemoryBlock,
)
__all__ = [
"BaseMemory",
"Memory",
"StaticMemoryBlock",
"VectorMemoryBlock",
"FactExtractionMemoryBlock",
# Deprecated
"ChatMemoryBuffer",
"ChatSummaryMemoryBuffer",
"SimpleComposableMemory",
"VectorMemory",
]
|
_CTC_DECODERS = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
def __getattr__(name: str):
if name in _CTC_DECODERS:
try:
from . import _ctc_decoder
except Exception as err:
raise RuntimeError(
"CTC Decoder suit requires flashlight-text package and optionally KenLM. Please install them."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__)
__all__ = _CTC_DECODERS
|
_INITIALIZED = False
_LAZILY_IMPORTED = [
"CTCHypothesis",
"CTCDecoder",
"CTCDecoderLM",
"CTCDecoderLMState",
"ctc_decoder",
"download_pretrained_files",
]
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
try:
from . import _ctc_decoder
except AttributeError as err:
raise RuntimeError(
"CTC decoder requires the decoder extension. Please set BUILD_CTC_DECODER=1 when building from source."
) from err
item = getattr(_ctc_decoder, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
def __dir__():
return sorted(__all__ + _LAZILY_IMPORTED)
__all__ = []
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of a
VideoNdArray (`VideoLoadResult.video`), an AudioNdArray
(`VideoLoadResult.audio`) and an NdArray containing the key frame indices
(`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray, VideoUrl
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, VideoNdArray)
assert isinstance(audio, AudioNdArray)
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a VideoLoadResult instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from io import BytesIO
from typing import TYPE_CHECKING, Any, NamedTuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor import AudioNdArray, NdArray, VideoNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='VideoBytes')
class VideoLoadResult(NamedTuple):
video: VideoNdArray
audio: AudioNdArray
key_frame_indices: NdArray
@_register_proto(proto_type_name='video_bytes')
class VideoBytes(bytes, AbstractType):
"""
Bytes that store a video and that can be load into a video tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self, **kwargs) -> VideoLoadResult:
"""
Load the video from the bytes into a VideoLoadResult object consisting of a
VideoNdArray (`VideoLoadResult.video`), an AudioNdArray
(`VideoLoadResult.audio`) and an NdArray containing the key frame indices
(`VideoLoadResult.key_frame_indices`).
---
```python
from docarray import BaseDoc
from docarray.typing import VideoUrl
import numpy as np
class MyDoc(BaseDoc):
video_url: VideoUrl
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true'
)
video, audio, key_frame_indices = doc.video_url.load()
assert isinstance(video, np.ndarray)
assert isinstance(audio, np.ndarray)
assert isinstance(key_frame_indices, np.ndarray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: a VideoLoadResult instance with video, audio and keyframe indices
"""
if TYPE_CHECKING:
import av
else:
av = import_library('av')
with av.open(BytesIO(self), **kwargs) as container:
audio_frames = []
video_frames = []
keyframe_indices = []
for frame in container.decode():
if type(frame) == av.audio.frame.AudioFrame:
audio_frames.append(frame.to_ndarray())
elif type(frame) == av.video.frame.VideoFrame:
video_frames.append(frame.to_ndarray(format='rgb24'))
if frame.key_frame == 1:
curr_index = len(video_frames)
keyframe_indices.append(curr_index)
if len(audio_frames) == 0:
audio = parse_obj_as(AudioNdArray, np.array(audio_frames))
else:
audio = parse_obj_as(AudioNdArray, np.stack(audio_frames))
video = parse_obj_as(VideoNdArray, np.stack(video_frames))
indices = parse_obj_as(NdArray, keyframe_indices)
return VideoLoadResult(video=video, audio=audio, key_frame_indices=indices)
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import OBJ_MIMETYPE
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return OBJ_MIMETYPE
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Optional, TypeVar, Union
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import trimesh
T = TypeVar('T', bound='Url3D')
@_register_proto(proto_type_name='url3d')
class Url3D(AnyUrl, ABC):
"""
URL to a file containing 3D mesh or point cloud information.
Can be remote (web) URL, or a local file path.
"""
def _load_trimesh_instance(
self: T,
force: Optional[str] = None,
skip_materials: bool = True,
trimesh_args: Optional[Dict[str, Any]] = None,
) -> Union['trimesh.Trimesh', 'trimesh.Scene']:
"""
Load the data from the url into a trimesh.Mesh or trimesh.Scene object.
:param force: str or None. For 'mesh' try to coerce scenes into a single mesh.
For 'scene' try to coerce everything into a scene.
:param skip_materials: Skip materials if True, else skip.
:param trimesh_args: dictionary of additional arguments for `trimesh.load()`
or `trimesh.load_remote()`.
:return: trimesh.Mesh or trimesh.Scene object
"""
import urllib.parse
if TYPE_CHECKING:
import trimesh
else:
trimesh = import_library('trimesh', raise_error=True)
if not trimesh_args:
trimesh_args = {}
scheme = urllib.parse.urlparse(self).scheme
loader = trimesh.load_remote if scheme in ['http', 'https'] else trimesh.load
mesh = loader(self, force=force, skip_materials=skip_materials, **trimesh_args)
return mesh
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend import Variable
from keras.src.backend import device
from keras.src.backend import name_scope
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api import activations
from keras.api import applications
from keras.api import callbacks
from keras.api import config
from keras.api import constraints
from keras.api import datasets
from keras.api import distribution
from keras.api import dtype_policies
from keras.api import export
from keras.api import initializers
from keras.api import legacy
from keras.api import mixed_precision
from keras.api import models
from keras.api import ops
from keras.api import optimizers
from keras.api import quantizers
from keras.api import random
from keras.api import regularizers
from keras.api import tree
from keras.api import utils
from keras.api._tf_keras.keras import backend
from keras.api._tf_keras.keras import layers
from keras.api._tf_keras.keras import losses
from keras.api._tf_keras.keras import metrics
from keras.api._tf_keras.keras import preprocessing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.exports import Variable
from keras.src.backend.exports import device
from keras.src.backend.exports import name_scope
from keras.src.dtype_policies.dtype_policy import DTypePolicy
from keras.src.dtype_policies.dtype_policy import FloatDTypePolicy
from keras.src.initializers.initializer import Initializer
from keras.src.layers.core.input_layer import Input
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.losses.loss import Loss
from keras.src.metrics.metric import Metric
from keras.src.models.model import Model
from keras.src.models.sequential import Sequential
from keras.src.ops.function import Function
from keras.src.ops.operation import Operation
from keras.src.optimizers.optimizer import Optimizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.regularizers.regularizers import Regularizer
from keras.src.version import __version__
from keras.src.version import version
|
"""
Given a dataset with parallel sentences, one "english" column and one "non_english" column, this script evaluates a model on the translation task.
Given a sentence in the "english" column, the model should find the correct translation in the "non_english" column, based on just the embeddings.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Good options for datasets are:
* sentence-transformers/parallel-sentences-wikimatrix
* sentence-transformers/parallel-sentences-tatoeba
* sentence-transformers/parallel-sentences-talks
As these have development sets.
Usage:
python examples/evaluation/evaluation_translation_matching.py [model_name_or_path] [dataset_name] [subset1] [subset2] ...
For example:
python examples/evaluation/evaluation_translation_matching.py distiluse-base-multilingual-cased sentence-transformers/parallel-sentences-tatoeba en-ar en-de en-nl
"""
import logging
import sys
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, evaluation
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1]
dataset_name = sys.argv[2]
subsets = sys.argv[3:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for subset in subsets:
dataset = load_dataset(dataset_name, subset)
datasets = {}
if dataset.column_names == ["train"]:
num_samples = min(5000, len(dataset["train"]))
datasets[f"train[:{num_samples}]"].append(dataset["train"].select(range(num_samples)))
else:
for split, sub_dataset in dataset.items():
if split != "train":
datasets[split] = sub_dataset
for split, sub_dataset in datasets.items():
logging.info(f"{dataset_name}, subset={subset}, split={split}, num_samples={len(sub_dataset)}")
translation_evaluator = evaluation.TranslationEvaluator(
sub_dataset["english"],
sub_dataset["non_english"],
name=f"{dataset_name}-{subset}-{split}",
batch_size=inference_batch_size,
)
translation_evaluator(model)
|
"""
Given a tab separated file (.tsv) with parallel sentences, where the second column is the translation of the sentence in the first column, for example, in the format:
src1 trg1
src2 trg2
...
where trg_i is the translation of src_i.
Given src_i, the TranslationEvaluator checks which trg_j has the highest similarity using cosine similarity. If i == j, we assume
a match, i.e., the correct translation has been found for src_i out of all possible target sentences.
It then computes an accuracy over all possible source sentences src_i. Equivalently, it computes also the accuracy for the other direction.
A high accuracy score indicates that the model is able to find the correct translation out of a large pool with sentences.
Usage:
python [model_name_or_path] [parallel-file1] [parallel-file2] ...
For example:
python distiluse-base-multilingual-cased talks-en-de.tsv.gz
See the training_multilingual/get_parallel_data_...py scripts for getting parallel sentence data from different sources
"""
from sentence_transformers import SentenceTransformer, evaluation, LoggingHandler
import sys
import gzip
import os
import logging
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
model_name = sys.argv[1]
filepaths = sys.argv[2:]
inference_batch_size = 32
model = SentenceTransformer(model_name)
for filepath in filepaths:
src_sentences = []
trg_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, "r", encoding="utf8"
) as fIn:
for line in fIn:
splits = line.strip().split("\t")
if len(splits) >= 2:
src_sentences.append(splits[0])
trg_sentences.append(splits[1])
logger.info(os.path.basename(filepath) + ": " + str(len(src_sentences)) + " sentence pairs")
dev_trans_acc = evaluation.TranslationEvaluator(
src_sentences, trg_sentences, name=os.path.basename(filepath), batch_size=inference_batch_size
)
dev_trans_acc(model)
|
"""String output parser."""
from typing import Optional as Optional
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""StrOutputParser is serializable.
Returns:
True
"""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object.
Default is ["langchain", "schema", "output_parser"].
"""
return ["langchain", "schema", "output_parser"]
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
StrOutputParser.model_rebuild()
|
from typing import Optional as Optional
from langchain_core.output_parsers.transform import BaseTransformOutputParser
class StrOutputParser(BaseTransformOutputParser[str]):
"""OutputParser that parses LLMResult into the top likely string."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "output_parser"]
@property
def _type(self) -> str:
"""Return the output parser type for serialization."""
return "default"
def parse(self, text: str) -> str:
"""Returns the input text with no changes."""
return text
StrOutputParser.model_rebuild()
|
from __future__ import annotations
import json
import logging
import time
from typing import List, Optional, Type
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl, validator
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class SpeechToTextInput(BaseModel):
query: HttpUrl = Field(description="url of the audio to analyze")
class EdenAiSpeechToTextTool(EdenaiTool):
"""Tool that queries the Eden AI Speech To Text API.
for api reference check edenai documentation:
https://app.edenai.run/bricks/speech/asynchronous-speech-to-text.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_speech_to_text"
description: str = (
"A wrapper around edenai Services speech to text "
"Useful for when you have to convert audio to text."
"Input should be a url to an audio file."
)
args_schema: Type[BaseModel] = SpeechToTextInput
is_async: bool = True
language: Optional[str] = "en"
speakers: Optional[int]
profanity_filter: bool = False
custom_vocabulary: Optional[List[str]]
feature: str = "audio"
subfeature: str = "speech_to_text_async"
base_url: str = "https://api.edenai.run/v2/audio/speech_to_text_async/"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
def _wait_processing(self, url: str) -> requests.Response:
for _ in range(10):
time.sleep(1)
audio_analysis_result = self._get_edenai(url)
temp = audio_analysis_result.json()
if temp["status"] == "finished":
if temp["results"][self.providers[0]]["error"] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
{temp["results"][self.providers[0]]["error"]}"""
)
else:
return audio_analysis_result
raise Exception("Edenai speech to text job id processing Timed out")
def _parse_response(self, response: dict) -> str:
return response["public_id"]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"file_url": query,
"language": self.language,
"speakers": self.speakers,
"profanity_filter": self.profanity_filter,
"custom_vocabulary": self.custom_vocabulary,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
job_id = self._call_eden_ai(query_params)
url = self.base_url + job_id
audio_analysis_result = self._wait_processing(url)
result = audio_analysis_result.text
formatted_text = json.loads(result)
return formatted_text["results"][self.providers[0]]["text"]
|
from __future__ import annotations
import json
import logging
import time
from typing import List, Optional, Type
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl, validator
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class SpeechToTextInput(BaseModel):
query: HttpUrl = Field(description="url of the audio to analyze")
class EdenAiSpeechToTextTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Speech To Text API.
for api reference check edenai documentation:
https://app.edenai.run/bricks/speech/asynchronous-speech-to-text.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_speech_to_text"
description: str = (
"A wrapper around edenai Services speech to text "
"Useful for when you have to convert audio to text."
"Input should be a url to an audio file."
)
args_schema: Type[BaseModel] = SpeechToTextInput
is_async: bool = True
language: Optional[str] = "en"
speakers: Optional[int]
profanity_filter: bool = False
custom_vocabulary: Optional[List[str]]
feature: str = "audio"
subfeature: str = "speech_to_text_async"
base_url: str = "https://api.edenai.run/v2/audio/speech_to_text_async/"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
def _wait_processing(self, url: str) -> requests.Response:
for _ in range(10):
time.sleep(1)
audio_analysis_result = self._get_edenai(url)
temp = audio_analysis_result.json()
if temp["status"] == "finished":
if temp["results"][self.providers[0]]["error"] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
{temp["results"][self.providers[0]]["error"]}"""
)
else:
return audio_analysis_result
raise Exception("Edenai speech to text job id processing Timed out")
def _parse_response(self, response: dict) -> str:
return response["public_id"]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"file_url": query,
"language": self.language,
"speakers": self.speakers,
"profanity_filter": self.profanity_filter,
"custom_vocabulary": self.custom_vocabulary,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
job_id = self._call_eden_ai(query_params)
url = self.base_url + job_id
audio_analysis_result = self._wait_processing(url)
result = audio_analysis_result.text
formatted_text = json.loads(result)
return formatted_text["results"][self.providers[0]]["text"]
|
"""Select examples based on length."""
import re
from typing import Callable
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
from langchain_core.example_selectors.base import BaseExampleSelector
from langchain_core.prompts.prompt import PromptTemplate
def _get_length_based(text: str) -> int:
return len(re.split("\n| ", text))
class LengthBasedExampleSelector(BaseExampleSelector, BaseModel):
"""Select examples based on length."""
examples: list[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
get_text_length: Callable[[str], int] = _get_length_based
"""Function to measure prompt length. Defaults to word count."""
max_length: int = 2048
"""Max length for the prompt, beyond which examples are cut."""
example_text_lengths: list[int] = Field(default_factory=list) # :meta private:
"""Length of each example."""
def add_example(self, example: dict[str, str]) -> None:
"""Add new example to list.
Args:
example: A dictionary with keys as input variables
and values as their values.
"""
self.examples.append(example)
string_example = self.example_prompt.format(**example)
self.example_text_lengths.append(self.get_text_length(string_example))
async def aadd_example(self, example: dict[str, str]) -> None:
"""Async add new example to list.
Args:
example: A dictionary with keys as input variables
and values as their values.
"""
self.add_example(example)
@model_validator(mode="after")
def post_init(self) -> Self:
"""Validate that the examples are formatted correctly."""
if self.example_text_lengths:
return self
string_examples = [self.example_prompt.format(**eg) for eg in self.examples]
self.example_text_lengths = [self.get_text_length(eg) for eg in string_examples]
return self
def select_examples(self, input_variables: dict[str, str]) -> list[dict]:
"""Select which examples to use based on the input lengths.
Args:
input_variables: A dictionary with keys as input variables
and values as their values.
Returns:
A list of examples to include in the prompt.
"""
inputs = " ".join(input_variables.values())
remaining_length = self.max_length - self.get_text_length(inputs)
i = 0
examples = []
while remaining_length > 0 and i < len(self.examples):
new_length = remaining_length - self.example_text_lengths[i]
if new_length < 0:
break
examples.append(self.examples[i])
remaining_length = new_length
i += 1
return examples
async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]:
"""Async select which examples to use based on the input lengths.
Args:
input_variables: A dictionary with keys as input variables
and values as their values.
Returns:
A list of examples to include in the prompt.
"""
return self.select_examples(input_variables)
|
"""Select examples based on length."""
import re
from typing import Callable
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
from langchain_core.example_selectors.base import BaseExampleSelector
from langchain_core.prompts.prompt import PromptTemplate
def _get_length_based(text: str) -> int:
return len(re.split("\n| ", text))
class LengthBasedExampleSelector(BaseExampleSelector, BaseModel):
"""Select examples based on length."""
examples: list[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
get_text_length: Callable[[str], int] = _get_length_based
"""Function to measure prompt length. Defaults to word count."""
max_length: int = 2048
"""Max length for the prompt, beyond which examples are cut."""
example_text_lengths: list[int] = Field(default_factory=list) # :meta private:
"""Length of each example."""
def add_example(self, example: dict[str, str]) -> None:
"""Add new example to list.
Args:
example: A dictionary with keys as input variables
and values as their values.
"""
self.examples.append(example)
string_example = self.example_prompt.format(**example)
self.example_text_lengths.append(self.get_text_length(string_example))
async def aadd_example(self, example: dict[str, str]) -> None:
"""Async add new example to list.
Args:
example: A dictionary with keys as input variables
and values as their values.
"""
self.add_example(example)
@model_validator(mode="after")
def post_init(self) -> Self:
"""Validate that the examples are formatted correctly."""
if self.example_text_lengths:
return self
string_examples = [self.example_prompt.format(**eg) for eg in self.examples]
self.example_text_lengths = [self.get_text_length(eg) for eg in string_examples]
return self
def select_examples(self, input_variables: dict[str, str]) -> list[dict]:
"""Select which examples to use based on the input lengths.
Args:
input_variables: A dictionary with keys as input variables
and values as their values.
Returns:
A list of examples to include in the prompt.
"""
inputs = " ".join(input_variables.values())
remaining_length = self.max_length - self.get_text_length(inputs)
i = 0
examples = []
while remaining_length > 0 and i < len(self.examples):
new_length = remaining_length - self.example_text_lengths[i]
if new_length < 0:
break
else:
examples.append(self.examples[i])
remaining_length = new_length
i += 1
return examples
async def aselect_examples(self, input_variables: dict[str, str]) -> list[dict]:
"""Async select which examples to use based on the input lengths.
Args:
input_variables: A dictionary with keys as input variables
and values as their values.
Returns:
A list of examples to include in the prompt.
"""
return self.select_examples(input_variables)
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
@pytest.mark.proto
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
# test independent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 60, 128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(4, 224, 224))
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(100, 1))
# test dependent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
_ = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60, 128))
with pytest.raises(ValueError):
_ = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
MyTT = TorchTensor[3, 224, 224]
tensor = parse_obj_as(MyTT, torch.zeros(shape))
assert MyTT.__name__ == 'TorchTensor[3, 224, 224]'
assert MyTT.__qualname__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__name__ == 'TorchTensor'
assert tensor.__class__.__qualname__ == 'TorchTensor'
assert f'{tensor[0][0][0]}' == 'TorchTensor(0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
def test_parametrized_subclass():
c1 = TorchTensor[128]
c2 = TorchTensor[128]
assert issubclass(c1, c2)
assert issubclass(c1, TorchTensor)
assert issubclass(c1, torch.Tensor)
assert not issubclass(c1, TorchTensor[256])
def test_parametrized_instance():
t = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(t, TorchTensor[128])
assert isinstance(t, TorchTensor)
assert isinstance(t, torch.Tensor)
assert not isinstance(t, TorchTensor[256])
assert not isinstance(t, TorchTensor[2, 128])
assert not isinstance(t, TorchTensor[2, 2, 64])
def test_parametrized_equality():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert (t1 == t2).all()
def test_parametrized_operations():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, torch.Tensor)
assert isinstance(t_result, TorchTensor)
assert isinstance(t_result, TorchTensor[128])
|
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import TorchEmbedding, TorchTensor
def test_proto_tensor():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
tensor._to_node_protobuf()
def test_json_schema():
schema_json_of(TorchTensor)
def test_dump_json():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
orjson_dumps(tensor)
def test_unwrap():
tensor = parse_obj_as(TorchTensor, torch.zeros(3, 224, 224))
ndarray = tensor.unwrap()
assert not isinstance(ndarray, TorchTensor)
assert isinstance(tensor, TorchTensor)
assert isinstance(ndarray, torch.Tensor)
assert tensor.data_ptr() == ndarray.data_ptr()
assert (ndarray == torch.zeros(3, 224, 224)).all()
def test_parametrized():
# correct shape, single axis
tensor = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# correct shape, multiple axis
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong but reshapable shape
tensor = parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 3, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
# wrong and not reshapable shape
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 224, 224], torch.zeros(224, 224))
# test independent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
tensor = parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(3, 60, 128))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 60, 128)
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(4, 224, 224))
with pytest.raises(ValueError):
parse_obj_as(TorchTensor[3, 'x', 'y'], torch.zeros(100, 1))
# test dependent variable dimensions
tensor = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 224, 224))
assert isinstance(tensor, TorchTensor)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (3, 224, 224)
with pytest.raises(ValueError):
_ = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60, 128))
with pytest.raises(ValueError):
_ = parse_obj_as(TorchTensor[3, 'x', 'x'], torch.zeros(3, 60))
@pytest.mark.parametrize('shape', [(3, 224, 224), (224, 224, 3)])
def test_parameterized_tensor_class_name(shape):
MyTT = TorchTensor[3, 224, 224]
tensor = parse_obj_as(MyTT, torch.zeros(shape))
assert MyTT.__name__ == 'TorchTensor[3, 224, 224]'
assert MyTT.__qualname__ == 'TorchTensor[3, 224, 224]'
assert tensor.__class__.__name__ == 'TorchTensor'
assert tensor.__class__.__qualname__ == 'TorchTensor'
assert f'{tensor[0][0][0]}' == 'TorchTensor(0.)'
def test_torch_embedding():
# correct shape
tensor = parse_obj_as(TorchEmbedding[128], torch.zeros(128))
assert isinstance(tensor, TorchEmbedding)
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == (128,)
# wrong shape at data setting time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128], torch.zeros(256))
# illegal shape at class creation time
with pytest.raises(ValueError):
parse_obj_as(TorchEmbedding[128, 128], torch.zeros(128, 128))
def test_parametrized_subclass():
c1 = TorchTensor[128]
c2 = TorchTensor[128]
assert issubclass(c1, c2)
assert issubclass(c1, TorchTensor)
assert issubclass(c1, torch.Tensor)
assert not issubclass(c1, TorchTensor[256])
def test_parametrized_instance():
t = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert isinstance(t, TorchTensor[128])
assert isinstance(t, TorchTensor)
assert isinstance(t, torch.Tensor)
assert not isinstance(t, TorchTensor[256])
assert not isinstance(t, TorchTensor[2, 128])
assert not isinstance(t, TorchTensor[2, 2, 64])
def test_parametrized_equality():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
assert (t1 == t2).all()
def test_parametrized_operations():
t1 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t2 = parse_obj_as(TorchTensor[128], torch.zeros(128))
t_result = t1 + t2
assert isinstance(t_result, torch.Tensor)
assert isinstance(t_result, TorchTensor)
assert isinstance(t_result, TorchTensor[128])
|
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.schema import Document
def test_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Header
Header 1 content
# Header 2
Header 2 content
"""
)
]
)
assert len(splits) == 2
assert splits[0].metadata == {"header_path": "/"}
assert splits[1].metadata == {"header_path": "/"}
assert splits[0].text == "# Main Header\n\nHeader 1 content"
assert splits[1].text == "# Header 2\nHeader 2 content"
def test_header_splits_with_forwardslash() -> None:
markdown_parser = MarkdownNodeParser(
header_path_separator="\u203A"
) # Unicode for "›", infrequently used char
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Header
Header 1 content
## FAQ
FAQ content
### 24/7 Support
Support content
#### Contact info
Contact info content
"""
)
]
)
assert len(splits) == 4
assert splits[0].metadata == {"header_path": "›"}
assert splits[1].metadata == {"header_path": "›Main Header›"}
assert splits[2].metadata == {"header_path": "›Main Header›FAQ›"}
assert splits[3].metadata == {"header_path": "›Main Header›FAQ›24/7 Support›"}
assert splits[0].text == "# Main Header\n\nHeader 1 content"
assert splits[1].text == "## FAQ\nFAQ content"
assert splits[2].text == "### 24/7 Support\nSupport content"
assert splits[3].text == "#### Contact info\nContact info content"
def test_header_splits_with_indented_code_blocks() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""Some text
# Header 1
## Header 2
### Header 3
```txt
Non indented block code
```
A list begins here:
* Element 1
```txt
# has some indented code, but it's not handled as that.
```
* Element 2
```txt
# also has some code, but unbalanced fences (different number of spaces). Everything after this is considered code block!
```
* Element 3
* Element 4
### Another Header 3
```txt
# has some wrongly indented fence, and leads to incorrect header detection.
```
## Another Header 2
"""
)
]
)
assert len(splits) == 6
assert splits[0].metadata == {"header_path": "/"}
assert splits[0].text == "Some text"
assert splits[1].metadata == {"header_path": "/"}
assert splits[1].text == "# Header 1"
assert splits[2].metadata == {"header_path": "/Header 1/"}
assert splits[2].text == "## Header 2"
assert splits[3].metadata == {"header_path": "/Header 1/Header 2/"}
assert splits[3].text.endswith("* Element 4")
assert splits[4].metadata == {"header_path": "/Header 1/Header 2/"}
assert splits[4].text.endswith("```")
assert splits[5].metadata == {"header_path": "/Header 1/"}
assert splits[5].text == "## Another Header 2"
def test_non_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Header 1
#Not a header
Also # not a header
# Still not a header
"""
)
]
)
assert len(splits) == 1
def test_pre_header_content() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""
pre-header content
# Header 1
Content
## Sub-header
"""
)
]
)
assert len(splits) == 3
def test_header_metadata() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Header
Content
## Sub-header
Content
### Sub-sub header
Content
# New title
"""
)
]
)
assert len(splits) == 4
assert splits[0].metadata == {"header_path": "/"}
assert splits[1].metadata == {"header_path": "/Main Header/"}
assert splits[2].metadata == {"header_path": "/Main Header/Sub-header/"}
assert splits[3].metadata == {"header_path": "/"}
def test_header_metadata_with_level_jump() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Header
Content
### Sub-header
Content
### Sub-sub header
Content
"""
)
]
)
assert len(splits) == 3
assert splits[0].metadata == {"header_path": "/"}
assert splits[1].metadata == {"header_path": "/Main Header/"}
assert splits[2].metadata == {"header_path": "/Main Header/"}
|
from llama_index.core.node_parser.file.markdown import MarkdownNodeParser
from llama_index.core.schema import Document
def test_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Header
Header 1 content
# Header 2
Header 2 content
"""
)
]
)
assert len(splits) == 2
assert splits[0].metadata == {"header_path": "/"}
assert splits[1].metadata == {"header_path": "/"}
assert splits[0].text == "# Main Header\n\nHeader 1 content"
assert splits[1].text == "# Header 2\nHeader 2 content"
def test_header_splits_with_indented_code_blocks() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""Some text
# Header 1
## Header 2
### Header 3
```txt
Non indented block code
```
A list begins here:
* Element 1
```txt
# has some indented code, but it's not handled as that.
```
* Element 2
```txt
# also has some code, but unbalanced fences (different number of spaces). Everything after this is considered code block!
```
* Element 3
* Element 4
### Another Header 3
```txt
# has some wrongly indented fence, and leads to incorrect header detection.
```
## Another Header 2
"""
)
]
)
assert len(splits) == 6
assert splits[0].metadata == {"header_path": "/"}
assert splits[0].text == "Some text"
assert splits[1].metadata == {"header_path": "/"}
assert splits[1].text == "# Header 1"
assert splits[2].metadata == {"header_path": "/Header 1/"}
assert splits[2].text == "## Header 2"
assert splits[3].metadata == {"header_path": "/Header 1/Header 2/"}
assert splits[3].text.endswith("* Element 4")
assert splits[4].metadata == {"header_path": "/Header 1/Header 2/"}
assert splits[4].text.endswith("```")
assert splits[5].metadata == {"header_path": "/Header 1/"}
assert splits[5].text == "## Another Header 2"
def test_non_header_splits() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Header 1
#Not a header
Also # not a header
# Still not a header
"""
)
]
)
assert len(splits) == 1
def test_pre_header_content() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""
pre-header content
# Header 1
Content
## Sub-header
"""
)
]
)
assert len(splits) == 3
def test_header_metadata() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Header
Content
## Sub-header
Content
### Sub-sub header
Content
# New title
"""
)
]
)
assert len(splits) == 4
assert splits[0].metadata == {"header_path": "/"}
assert splits[1].metadata == {"header_path": "/Main Header/"}
assert splits[2].metadata == {"header_path": "/Main Header/Sub-header/"}
assert splits[3].metadata == {"header_path": "/"}
def test_header_metadata_with_level_jump() -> None:
markdown_parser = MarkdownNodeParser()
splits = markdown_parser.get_nodes_from_documents(
[
Document(
text="""# Main Header
Content
### Sub-header
Content
### Sub-sub header
Content
"""
)
]
)
assert len(splits) == 3
assert splits[0].metadata == {"header_path": "/"}
assert splits[1].metadata == {"header_path": "/Main Header/"}
assert splits[2].metadata == {"header_path": "/Main Header/"}
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'TensorFlowTensor',
]
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor', 'ImageTorchTensor'])
torch_available = is_torch_available()
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
__all__.extend(['TensorFlowEmbedding', 'TensorFlowTensor', 'ImageTensorFlowTensor'])
|
from docarray.typing.tensor.embedding import AnyEmbedding, NdArrayEmbedding
from docarray.typing.tensor.image import ImageNdArray, ImageTensor
from docarray.typing.tensor.ndarray import NdArray
from docarray.typing.tensor.tensor import AnyTensor
__all__ = [
'NdArray',
'AnyTensor',
'AnyEmbedding',
'NdArrayEmbedding',
'ImageNdArray',
'ImageTensor',
'TensorFlowTensor',
]
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding import TorchEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTorchTensor # noqa: F401
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
__all__.extend(['TorchEmbedding', 'TorchTensor', 'ImageTorchTensor'])
torch_available = is_torch_available()
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.embedding import TensorFlowEmbedding # noqa: F401
from docarray.typing.tensor.image import ImageTensorFlowTensor # noqa: F401
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
__all__.extend(['TensorFlowEmbedding', 'TensorFlowTensor', 'ImageTensorFlowTensor'])
|
"""Utilities for working with HTML."""
import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
".ico",
".png",
".jpg",
".jpeg",
".gif",
".svg",
".csv",
".bz2",
".zip",
".epub",
)
SUFFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) + r"[\#'\"]" for s in SUFFIXES_TO_IGNORE]) + ")"
)
PREFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) for s in PREFIXES_TO_IGNORE]) + ")"
)
DEFAULT_LINK_REGEX = (
rf"href=[\"']{PREFIXES_TO_IGNORE_REGEX}((?:{SUFFIXES_TO_IGNORE_REGEX}.)*?)[\#'\"]"
)
def find_all_links(
raw_html: str, *, pattern: Union[str, re.Pattern, None] = None
) -> list[str]:
"""Extract all links from a raw HTML string.
Args:
raw_html: original HTML.
pattern: Regex to use for extracting links from raw HTML.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
def extract_sub_links(
raw_html: str,
url: str,
*,
base_url: Optional[str] = None,
pattern: Union[str, re.Pattern, None] = None,
prevent_outside: bool = True,
exclude_prefixes: Sequence[str] = (),
continue_on_failure: bool = False,
) -> list[str]:
"""Extract all links from a raw HTML string and convert into absolute paths.
Args:
raw_html: original HTML.
url: the url of the HTML.
base_url: the base URL to check for outside links against.
pattern: Regex to use for extracting links from raw HTML.
prevent_outside: If True, ignore external links which are not children
of the base URL.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
continue_on_failure: If True, continue if parsing a specific link raises an
exception. Otherwise, raise the exception.
Returns:
List[str]: sub links.
"""
base_url_to_use = base_url if base_url is not None else url
parsed_base_url = urlparse(base_url_to_use)
parsed_url = urlparse(url)
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
try:
parsed_link = urlparse(link)
# Some may be absolute links like https://to/path
if parsed_link.scheme == "http" or parsed_link.scheme == "https":
absolute_path = link
# Some may have omitted the protocol like //to/path
elif link.startswith("//"):
absolute_path = f"{parsed_url.scheme}:{link}"
else:
absolute_path = urljoin(url, parsed_link.path)
if parsed_link.query:
absolute_path += f"?{parsed_link.query}"
absolute_paths.add(absolute_path)
except Exception as e:
if continue_on_failure:
logger.warning(
"Unable to load link %s. Raised exception:\n\n%s", link, e
)
continue
raise
results = []
for path in absolute_paths:
if any(path.startswith(exclude_prefix) for exclude_prefix in exclude_prefixes):
continue
if prevent_outside:
parsed_path = urlparse(path)
if parsed_base_url.netloc != parsed_path.netloc:
continue
# Will take care of verifying rest of path after netloc
# if it's more specific
if not path.startswith(base_url_to_use):
continue
results.append(path)
return results
|
import logging
import re
from collections.abc import Sequence
from typing import Optional, Union
from urllib.parse import urljoin, urlparse
logger = logging.getLogger(__name__)
PREFIXES_TO_IGNORE = ("javascript:", "mailto:", "#")
SUFFIXES_TO_IGNORE = (
".css",
".js",
".ico",
".png",
".jpg",
".jpeg",
".gif",
".svg",
".csv",
".bz2",
".zip",
".epub",
)
SUFFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) + r"[\#'\"]" for s in SUFFIXES_TO_IGNORE]) + ")"
)
PREFIXES_TO_IGNORE_REGEX = (
"(?!" + "|".join([re.escape(s) for s in PREFIXES_TO_IGNORE]) + ")"
)
DEFAULT_LINK_REGEX = (
rf"href=[\"']{PREFIXES_TO_IGNORE_REGEX}((?:{SUFFIXES_TO_IGNORE_REGEX}.)*?)[\#'\"]"
)
def find_all_links(
raw_html: str, *, pattern: Union[str, re.Pattern, None] = None
) -> list[str]:
"""Extract all links from a raw HTML string.
Args:
raw_html: original HTML.
pattern: Regex to use for extracting links from raw HTML.
Returns:
List[str]: all links
"""
pattern = pattern or DEFAULT_LINK_REGEX
return list(set(re.findall(pattern, raw_html)))
def extract_sub_links(
raw_html: str,
url: str,
*,
base_url: Optional[str] = None,
pattern: Union[str, re.Pattern, None] = None,
prevent_outside: bool = True,
exclude_prefixes: Sequence[str] = (),
continue_on_failure: bool = False,
) -> list[str]:
"""Extract all links from a raw HTML string and convert into absolute paths.
Args:
raw_html: original HTML.
url: the url of the HTML.
base_url: the base URL to check for outside links against.
pattern: Regex to use for extracting links from raw HTML.
prevent_outside: If True, ignore external links which are not children
of the base URL.
exclude_prefixes: Exclude any URLs that start with one of these prefixes.
continue_on_failure: If True, continue if parsing a specific link raises an
exception. Otherwise, raise the exception.
Returns:
List[str]: sub links.
"""
base_url_to_use = base_url if base_url is not None else url
parsed_base_url = urlparse(base_url_to_use)
parsed_url = urlparse(url)
all_links = find_all_links(raw_html, pattern=pattern)
absolute_paths = set()
for link in all_links:
try:
parsed_link = urlparse(link)
# Some may be absolute links like https://to/path
if parsed_link.scheme == "http" or parsed_link.scheme == "https":
absolute_path = link
# Some may have omitted the protocol like //to/path
elif link.startswith("//"):
absolute_path = f"{parsed_url.scheme}:{link}"
else:
absolute_path = urljoin(url, parsed_link.path)
if parsed_link.query:
absolute_path += f"?{parsed_link.query}"
absolute_paths.add(absolute_path)
except Exception as e:
if continue_on_failure:
logger.warning(
"Unable to load link %s. Raised exception:\n\n%s", link, e
)
continue
raise
results = []
for path in absolute_paths:
if any(path.startswith(exclude_prefix) for exclude_prefix in exclude_prefixes):
continue
if prevent_outside:
parsed_path = urlparse(path)
if parsed_base_url.netloc != parsed_path.netloc:
continue
# Will take care of verifying rest of path after netloc
# if it's more specific
if not path.startswith(base_url_to_use):
continue
results.append(path)
return results
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import bounding_boxes
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.api.utils import legacy
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.variables import standardize_dtype
from keras.src.layers.preprocessing.feature_space import FeatureSpace
from keras.src.ops.operation_utils import get_source_inputs
from keras.src.saving.object_registration import CustomObjectScope
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import get_custom_objects
from keras.src.saving.object_registration import get_registered_name
from keras.src.saving.object_registration import get_registered_object
from keras.src.saving.object_registration import register_keras_serializable
from keras.src.saving.serialization_lib import deserialize_keras_object
from keras.src.saving.serialization_lib import serialize_keras_object
from keras.src.trainers.data_adapters.data_adapter_utils import (
pack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.data_adapter_utils import (
unpack_x_y_sample_weight,
)
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.trainers.data_adapters.py_dataset_adapter import (
PyDataset as Sequence,
)
from keras.src.utils.audio_dataset_utils import audio_dataset_from_directory
from keras.src.utils.config import Config
from keras.src.utils.dataset_utils import split_dataset
from keras.src.utils.file_utils import get_file
from keras.src.utils.image_dataset_utils import image_dataset_from_directory
from keras.src.utils.image_utils import array_to_img
from keras.src.utils.image_utils import img_to_array
from keras.src.utils.image_utils import load_img
from keras.src.utils.image_utils import save_img
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.model_visualization import model_to_dot
from keras.src.utils.model_visualization import plot_model
from keras.src.utils.numerical_utils import normalize
from keras.src.utils.numerical_utils import to_categorical
from keras.src.utils.progbar import Progbar
from keras.src.utils.rng_utils import set_random_seed
from keras.src.utils.sequence_utils import pad_sequences
from keras.src.utils.text_dataset_utils import text_dataset_from_directory
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .logging import *
from .registry import *
from .utils import *
|
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
from .config import *
from .data import *
from .dataset import *
from .fileio import *
from .hooks import *
from .registry import *
from .utils import *
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=image_size),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=4, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.04),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
|
_base_ = [
'../_base_/models/retinanet_r50_fpn.py',
'../_base_/schedules/schedule_1x.py',
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
image_size = (896, 896)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
norm_cfg = dict(type='BN', requires_grad=True)
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
_delete_=True,
type='EfficientNet',
arch='b3',
drop_path_rate=0.2,
out_indices=(3, 4, 5),
frozen_stages=0,
norm_cfg=dict(
type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01),
norm_eval=False,
init_cfg=dict(
type='Pretrained', prefix='backbone', checkpoint=checkpoint)),
neck=dict(
in_channels=[48, 136, 384],
start_level=0,
out_channels=256,
relu_before_extra_convs=True,
no_norm_on_lateral=True,
norm_cfg=norm_cfg),
bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg),
# training and testing settings
train_cfg=dict(assigner=dict(neg_iou_thr=0.5)))
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.8, 1.2),
keep_ratio=True),
dict(type='RandomCrop', crop_size=image_size),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='Resize', scale=image_size, keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=4, num_workers=4, dataset=dict(pipeline=train_pipeline))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# optimizer
optim_wrapper = dict(
optimizer=dict(lr=0.04),
paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True))
# learning policy
max_epochs = 12
param_scheduler = [
dict(type='LinearLR', start_factor=0.1, by_epoch=False, begin=0, end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs)
# cudnn_benchmark=True can accelerate fix-size training
env_cfg = dict(cudnn_benchmark=True)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (4 samples per GPU)
auto_scale_lr = dict(base_batch_size=32)
|
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# augmentation strategy originates from DETR.
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
img_scale=[(400, 1333), (500, 1333), (600, 1333)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(train=dict(pipeline=train_pipeline))
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway
from jina.constants import __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.ports[0]
self.grpc_port = self.ports[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
jina_pb2_grpc.add_JinaSingleDataRequestRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
import grpc
from grpc_health.v1 import health, health_pb2, health_pb2_grpc
from grpc_reflection.v1alpha import reflection
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.proto import jina_pb2, jina_pb2_grpc
class DummyResponseModel(BaseModel):
protocol: str
class MultiProtocolGateway(Gateway):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.http_port = self.ports[0]
self.grpc_port = self.ports[1]
self.health_servicer = health.HealthServicer(experimental_non_blocking=True)
async def _setup_http_server(self):
from fastapi import FastAPI
app = FastAPI(
title='HTTP Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {'protocol': 'http'}
self.http_server = Server(
Config(app, host=__default_host__, port=self.http_port)
)
async def _setup_grpc_server(self):
self.grpc_server = grpc.aio.server()
jina_pb2_grpc.add_JinaRPCServicer_to_server(
self.streamer._streamer, self.grpc_server
)
service_names = (
jina_pb2.DESCRIPTOR.services_by_name['JinaRPC'].full_name,
reflection.SERVICE_NAME,
)
# Mark all services as healthy.
health_pb2_grpc.add_HealthServicer_to_server(
self.health_servicer, self.grpc_server
)
for service in service_names:
self.health_servicer.set(service, health_pb2.HealthCheckResponse.SERVING)
reflection.enable_server_reflection(service_names, self.grpc_server)
self.grpc_server.add_insecure_port(f'{__default_host__}:{self.grpc_port}')
await self.grpc_server.start()
async def setup_server(self):
await self._setup_http_server()
await self._setup_grpc_server()
async def run_server(self):
await self.http_server.serve()
await self.grpc_server.wait_for_termination()
async def shutdown(self):
self.http_server.should_exit = True
await self.grpc_server.stop(0)
await self.http_server.shutdown()
self.health_servicer.enter_graceful_shutdown()
@property
def _should_exit(self) -> bool:
return self.http_server.should_exit
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.1'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '2.24.0'
short_version = __version__
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from __future__ import annotations
import tempfile
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryDirectory(ignore_cleanup_errors=True), this also works on Python 3.9.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["ignore_cleanup_errors"] = True
try:
super().__init__(*args, **kwargs)
except TypeError:
del kwargs["ignore_cleanup_errors"]
super().__init__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
try:
super().__exit__(*args, **kwargs)
except NotADirectoryError:
pass
|
from __future__ import annotations
import tempfile
class SafeTemporaryDirectory(tempfile.TemporaryDirectory):
"""
The GitHub Actions CI on Windows sometimes raises a NotADirectoryError when cleaning up the temporary directory.
This class is a workaround to avoid the error.
Unlike tempfile.TemporaryDirectory(ignore_cleanup_errors=True), this also works on Python 3.8 and 3.9.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["ignore_cleanup_errors"] = True
try:
super().__init__(*args, **kwargs)
except TypeError:
del kwargs["ignore_cleanup_errors"]
super().__init__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
try:
super().__exit__(*args, **kwargs)
except NotADirectoryError:
pass
|
import subprocess
import pytest
from flair_text import FlairTextEncoder
from jina import Document, DocumentArray, Flow
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=FlairTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
import subprocess
import pytest
from jina import Document, DocumentArray, Flow
from ...flair_text import FlairTextEncoder
_EMBEDDING_DIM = 100
@pytest.mark.parametrize('request_size', [1, 10, 50, 100])
def test_integration(request_size: int):
docs = DocumentArray(
[Document(text='just some random text here') for _ in range(50)]
)
with Flow(return_results=True).add(uses=FlairTextEncoder) as flow:
resp = flow.post(
on='/index',
inputs=docs,
request_size=request_size,
return_results=True,
)
assert sum(len(resp_batch.docs) for resp_batch in resp) == 50
for r in resp:
for doc in r.docs:
assert doc.embedding.shape == (_EMBEDDING_DIM,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image}',
],
timeout=30,
check=True,
)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from sentence_transformers import SentenceTransformer, util, models
from PIL import Image
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPProcessor, CLIPModel
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.preprocessing.sequence import TimeseriesGenerator
from keras.src.legacy.preprocessing.sequence import make_sampling_table
from keras.src.legacy.preprocessing.sequence import skipgrams
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
# fmt: off
raise RuntimeError(
f"Unexpected environment variable value `{var}={val}`. "
f"Expected one of {trues + falses}")
# fmt: on
return False
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
message = "This function has been deprecated. "
if remove:
message += f'It will be removed from {"future" if version is None else version} release. '
wrapped.__doc__ = f"""DEPRECATED: {func.__doc__}
.. warning::
{message}
{direction}
"""
return wrapped
return decorator
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
import importlib.util
import os
import warnings
from functools import wraps
from typing import Optional
def eval_env(var, default):
"""Check if environment varable has True-y value"""
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
# fmt: off
raise RuntimeError(
f"Unexpected environment variable value `{var}={val}`. "
f"Expected one of {trues + falses}")
# fmt: on
return False
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f"module: {missing[0]}" if len(missing) == 1 else f"modules: {missing}"
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} requires {req}")
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None, remove: bool = False):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
remove (bool): If enabled, append future removal message.
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = f"{func.__module__}.{func.__name__} has been deprecated. {direction}"
if remove:
message += f' It will be removed from {"future" if version is None else version} release. '
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def fail_with_message(message):
"""Generate decorator to give users message about missing TorchAudio extension."""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f"{func.__module__}.{func.__name__} {message}")
return wrapped
return decorator
def no_op(func):
"""Op-op decorator. Used in place of fail_with_message when a functionality that requires extension works fine."""
return func
|
"""DeepLake reader."""
from typing import List, Optional, Union
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1, ord=1),
"max": lambda a, b: np.linalg.norm(a - b, axis=1, ord=np.inf),
"cos": lambda a, b: np.dot(a, b.T)
/ (np.linalg.norm(a) * np.linalg.norm(b, axis=1)),
"dot": lambda a, b: np.dot(a, b.T),
}
def vector_search(
query_vector: Union[List, np.ndarray],
data_vectors: np.ndarray,
distance_metric: str = "l2",
limit: Optional[int] = 4,
) -> List:
"""
Naive search for nearest neighbors
args:
query_vector: Union[List, np.ndarray]
data_vectors: np.ndarray
limit (int): number of nearest neighbors
distance_metric: distance function 'L2' for Euclidean, 'L1' for Nuclear, 'Max'
l-infinity distance, 'cos' for cosine similarity, 'dot' for dot product
returns:
nearest_indices: List, indices of nearest neighbors.
"""
# Calculate the distance between the query_vector and all data_vectors
if isinstance(query_vector, list):
query_vector = np.array(query_vector)
query_vector = query_vector.reshape(1, -1)
distances = distance_metric_map[distance_metric](query_vector, data_vectors)
nearest_indices = np.argsort(distances)
nearest_indices = (
nearest_indices[::-1][:limit]
if distance_metric in ["cos"]
else nearest_indices[:limit]
)
return nearest_indices.tolist()
class DeepLakeReader(BaseReader):
"""
DeepLake reader.
Retrieve documents from existing DeepLake datasets.
Args:
dataset_name: Name of the deeplake dataset.
"""
def __init__(
self,
token: Optional[str] = None,
):
"""Initializing the deepLake reader."""
import_err_msg = (
"`deeplake` package not found, please run `pip install deeplake`"
)
try:
import deeplake # noqa
except ImportError:
raise ImportError(import_err_msg)
self.token = token
def load_data(
self,
query_vector: List[float],
dataset_path: str,
limit: int = 4,
distance_metric: str = "l2",
) -> List[Document]:
"""
Load data from DeepLake.
Args:
dataset_name (str): Name of the DeepLake dataset.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
import deeplake
from deeplake.util.exceptions import TensorDoesNotExistError
dataset = deeplake.load(dataset_path, token=self.token)
try:
embeddings = dataset.embedding.numpy(fetch_chunks=True)
except Exception:
raise TensorDoesNotExistError("embedding")
indices = vector_search(
query_vector, embeddings, distance_metric=distance_metric, limit=limit
)
documents = []
for idx in indices:
document = Document(
text=str(dataset[idx].text.numpy().tolist()[0]),
id_=dataset[idx].ids.numpy().tolist()[0],
)
documents.append(document)
return documents
|
"""DeepLake reader."""
from typing import List, Optional, Union
import numpy as np
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
distance_metric_map = {
"l2": lambda a, b: np.linalg.norm(a - b, axis=1, ord=2),
"l1": lambda a, b: np.linalg.norm(a - b, axis=1, ord=1),
"max": lambda a, b: np.linalg.norm(a - b, axis=1, ord=np.inf),
"cos": lambda a, b: np.dot(a, b.T)
/ (np.linalg.norm(a) * np.linalg.norm(b, axis=1)),
"dot": lambda a, b: np.dot(a, b.T),
}
def vector_search(
query_vector: Union[List, np.ndarray],
data_vectors: np.ndarray,
distance_metric: str = "l2",
limit: Optional[int] = 4,
) -> List:
"""Naive search for nearest neighbors
args:
query_vector: Union[List, np.ndarray]
data_vectors: np.ndarray
limit (int): number of nearest neighbors
distance_metric: distance function 'L2' for Euclidean, 'L1' for Nuclear, 'Max'
l-infinity distance, 'cos' for cosine similarity, 'dot' for dot product
returns:
nearest_indices: List, indices of nearest neighbors.
"""
# Calculate the distance between the query_vector and all data_vectors
if isinstance(query_vector, list):
query_vector = np.array(query_vector)
query_vector = query_vector.reshape(1, -1)
distances = distance_metric_map[distance_metric](query_vector, data_vectors)
nearest_indices = np.argsort(distances)
nearest_indices = (
nearest_indices[::-1][:limit]
if distance_metric in ["cos"]
else nearest_indices[:limit]
)
return nearest_indices.tolist()
class DeepLakeReader(BaseReader):
"""DeepLake reader.
Retrieve documents from existing DeepLake datasets.
Args:
dataset_name: Name of the deeplake dataset.
"""
def __init__(
self,
token: Optional[str] = None,
):
"""Initializing the deepLake reader."""
import_err_msg = (
"`deeplake` package not found, please run `pip install deeplake`"
)
try:
import deeplake # noqa
except ImportError:
raise ImportError(import_err_msg)
self.token = token
def load_data(
self,
query_vector: List[float],
dataset_path: str,
limit: int = 4,
distance_metric: str = "l2",
) -> List[Document]:
"""Load data from DeepLake.
Args:
dataset_name (str): Name of the DeepLake dataset.
query_vector (List[float]): Query vector.
limit (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
import deeplake
from deeplake.util.exceptions import TensorDoesNotExistError
dataset = deeplake.load(dataset_path, token=self.token)
try:
embeddings = dataset.embedding.numpy(fetch_chunks=True)
except Exception:
raise TensorDoesNotExistError("embedding")
indices = vector_search(
query_vector, embeddings, distance_metric=distance_metric, limit=limit
)
documents = []
for idx in indices:
document = Document(
text=str(dataset[idx].text.numpy().tolist()[0]),
id_=dataset[idx].ids.numpy().tolist()[0],
)
documents.append(document)
return documents
|
"""
This script contains an example how to perform semantic search with Qdrant.
You need Qdrant up and running locally:
https://qdrant.tech/documentation/quickstart/
Further, you need the Python Qdrant Client installed: https://python-client.qdrant.tech/, e.g.:
```
pip install qdrant-client
```
This script was created for `qdrant-client` v1.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train", trust_remote_code=True)
corpus = dataset["answer"]
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
sparse_model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=16, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.search_engines import semantic_search_qdrant
# 1. Load the quora corpus with questions
dataset = load_dataset("quora", split="train", trust_remote_code=True).map(
lambda batch: {"text": [text for sample in batch["questions"] for text in sample["text"]]},
batched=True,
remove_columns=["questions", "is_duplicate"],
)
max_corpus_size = 100_000
corpus = dataset["text"][:max_corpus_size]
# 2. Come up with some queries
queries = [
"How do I become a good programmer?",
"How do I become a good data scientist?",
]
# 3. Load the model
sparse_model = SparseEncoder("sparse-embedding/splade_example")
# 5. Encode the corpus
corpus_embeddings = sparse_model.encode(corpus, convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True)
# Initially, we don't have a qdrant index yet
corpus_index = None
while True:
# 6. Encode the queries using the full precision
start_time = time.time()
query_embeddings = sparse_model.encode(queries, convert_to_sparse_tensor=True)
print(f"Encoding time: {time.time() - start_time:.6f} seconds")
# 7. Perform semantic search using qdrant
results, search_time, corpus_index = semantic_search_qdrant(
query_embeddings,
corpus_index=corpus_index,
corpus_embeddings=corpus_embeddings if corpus_index is None else None,
top_k=10,
output_index=True,
)
# 8. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 10. Prompt for more queries
queries = [input("Please enter a question: ")]
|
import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import abcdefg # no install and unlist
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import ngt # list but no install
fake_tags = ['ngt', 'index', 'py37']
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt.abc.edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
from ngt.abc import edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
import abcdefg
assert not ie._tags
def test_no_suppress_other_exception():
with pytest.raises(Exception):
with ImportExtensions(required=False, logger=default_logger):
raise Exception
with pytest.raises(Exception):
with ImportExtensions(required=True, logger=default_logger):
raise Exception
def test_path_importer(tmpdir):
tmpmodule = f'package.py'
with open(tmpdir / tmpmodule, 'w') as f:
f.write("raise ImportError")
from jina.importer import PathImporter
with pytest.raises(ImportError):
PathImporter.add_modules(tmpdir / tmpmodule)
with pytest.raises(FileNotFoundError):
PathImporter.add_modules('some_package.py')
|
import pytest
from jina.importer import ImportExtensions
from jina.logging.predefined import default_logger
def test_bad_import():
from jina.logging.predefined import default_logger
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import abcdefg # no install and unlist
with pytest.raises(ModuleNotFoundError):
with ImportExtensions(required=True, logger=default_logger):
import ngt # list but no install
fake_tags = ['ngt', 'index', 'py37']
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
import ngt.abc.edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
ie._tags = fake_tags
from ngt.abc import edf
assert ie._tags == fake_tags
with ImportExtensions(required=False, logger=default_logger) as ie:
import abcdefg
assert not ie._tags
def test_no_suppress_other_exception():
with pytest.raises(Exception):
with ImportExtensions(required=False, logger=default_logger):
raise Exception
with pytest.raises(Exception):
with ImportExtensions(required=True, logger=default_logger):
raise Exception
def test_path_importer(tmpdir):
tmpmodule = f'package.py'
with open(tmpdir / tmpmodule, 'w') as f:
f.write("raise ImportError")
from jina.importer import PathImporter
with pytest.raises(ImportError):
PathImporter.add_modules(tmpdir / tmpmodule)
with pytest.raises(FileNotFoundError):
PathImporter.add_modules('some_package.py')
|
import ipaddress
import socket
from typing import Callable
from urllib.parse import urlparse
import requests as req
from backend.util.settings import Config
# List of IP networks to block
BLOCKED_IP_NETWORKS = [
# --8<-- [start:BLOCKED_IP_NETWORKS]
ipaddress.ip_network("0.0.0.0/8"), # "This" Network
ipaddress.ip_network("10.0.0.0/8"), # Private-Use
ipaddress.ip_network("127.0.0.0/8"), # Loopback
ipaddress.ip_network("169.254.0.0/16"), # Link Local
ipaddress.ip_network("172.16.0.0/12"), # Private-Use
ipaddress.ip_network("192.168.0.0/16"), # Private-Use
ipaddress.ip_network("224.0.0.0/4"), # Multicast
ipaddress.ip_network("240.0.0.0/4"), # Reserved for Future Use
# --8<-- [end:BLOCKED_IP_NETWORKS]
]
def is_ip_blocked(ip: str) -> bool:
"""
Checks if the IP address is in a blocked network.
"""
ip_addr = ipaddress.ip_address(ip)
return any(ip_addr in network for network in BLOCKED_IP_NETWORKS)
def validate_url(url: str, trusted_origins: list[str]) -> str:
"""
Validates the URL to prevent SSRF attacks by ensuring it does not point to a private
or untrusted IP address, unless whitelisted.
"""
url = url.strip().strip("/")
if not url.startswith(("http://", "https://")):
url = "http://" + url
parsed_url = urlparse(url)
hostname = parsed_url.hostname
if not hostname:
raise ValueError(f"Invalid URL: Unable to determine hostname from {url}")
if any(hostname == origin for origin in trusted_origins):
return url
# Resolve all IP addresses for the hostname
ip_addresses = {result[4][0] for result in socket.getaddrinfo(hostname, None)}
if not ip_addresses:
raise ValueError(f"Unable to resolve IP address for {hostname}")
# Check if all IP addresses are global
for ip in ip_addresses:
if is_ip_blocked(ip):
raise ValueError(
f"Access to private IP address at {hostname}: {ip} is not allowed."
)
return url
class Requests:
"""
A wrapper around the requests library that validates URLs before making requests.
"""
def __init__(
self,
trusted_origins: list[str] | None = None,
raise_for_status: bool = True,
extra_url_validator: Callable[[str], str] | None = None,
extra_headers: dict[str, str] | None = None,
):
self.trusted_origins = []
for url in trusted_origins or []:
hostname = urlparse(url).hostname
if not hostname:
raise ValueError(f"Invalid URL: Unable to determine hostname of {url}")
self.trusted_origins.append(hostname)
self.raise_for_status = raise_for_status
self.extra_url_validator = extra_url_validator
self.extra_headers = extra_headers
def request(
self, method, url, headers=None, allow_redirects=False, *args, **kwargs
) -> req.Response:
if self.extra_headers is not None:
headers = {**(headers or {}), **self.extra_headers}
url = validate_url(url, self.trusted_origins)
if self.extra_url_validator is not None:
url = self.extra_url_validator(url)
response = req.request(
method,
url,
headers=headers,
allow_redirects=allow_redirects,
*args,
**kwargs,
)
if self.raise_for_status:
response.raise_for_status()
return response
def get(self, url, *args, **kwargs) -> req.Response:
return self.request("GET", url, *args, **kwargs)
def post(self, url, *args, **kwargs) -> req.Response:
return self.request("POST", url, *args, **kwargs)
def put(self, url, *args, **kwargs) -> req.Response:
return self.request("PUT", url, *args, **kwargs)
def delete(self, url, *args, **kwargs) -> req.Response:
return self.request("DELETE", url, *args, **kwargs)
def head(self, url, *args, **kwargs) -> req.Response:
return self.request("HEAD", url, *args, **kwargs)
def options(self, url, *args, **kwargs) -> req.Response:
return self.request("OPTIONS", url, *args, **kwargs)
def patch(self, url, *args, **kwargs) -> req.Response:
return self.request("PATCH", url, *args, **kwargs)
requests = Requests(trusted_origins=Config().trust_endpoints_for_requests)
|
import ipaddress
import socket
from typing import Callable
from urllib.parse import urlparse
import requests as req
from backend.util.settings import Config
# List of IP networks to block
BLOCKED_IP_NETWORKS = [
ipaddress.ip_network("0.0.0.0/8"), # "This" Network
ipaddress.ip_network("10.0.0.0/8"), # Private-Use
ipaddress.ip_network("127.0.0.0/8"), # Loopback
ipaddress.ip_network("169.254.0.0/16"), # Link Local
ipaddress.ip_network("172.16.0.0/12"), # Private-Use
ipaddress.ip_network("192.168.0.0/16"), # Private-Use
ipaddress.ip_network("224.0.0.0/4"), # Multicast
ipaddress.ip_network("240.0.0.0/4"), # Reserved for Future Use
]
def is_ip_blocked(ip: str) -> bool:
"""
Checks if the IP address is in a blocked network.
"""
ip_addr = ipaddress.ip_address(ip)
return any(ip_addr in network for network in BLOCKED_IP_NETWORKS)
def validate_url(url: str, trusted_origins: list[str]) -> str:
"""
Validates the URL to prevent SSRF attacks by ensuring it does not point to a private
or untrusted IP address, unless whitelisted.
"""
url = url.strip().strip("/")
if not url.startswith(("http://", "https://")):
url = "http://" + url
parsed_url = urlparse(url)
hostname = parsed_url.hostname
if not hostname:
raise ValueError(f"Invalid URL: Unable to determine hostname from {url}")
if any(hostname == origin for origin in trusted_origins):
return url
# Resolve all IP addresses for the hostname
ip_addresses = {result[4][0] for result in socket.getaddrinfo(hostname, None)}
if not ip_addresses:
raise ValueError(f"Unable to resolve IP address for {hostname}")
# Check if all IP addresses are global
for ip in ip_addresses:
if is_ip_blocked(ip):
raise ValueError(
f"Access to private IP address at {hostname}: {ip} is not allowed."
)
return url
class Requests:
"""
A wrapper around the requests library that validates URLs before making requests.
"""
def __init__(
self,
trusted_origins: list[str] | None = None,
raise_for_status: bool = True,
extra_url_validator: Callable[[str], str] | None = None,
extra_headers: dict[str, str] | None = None,
):
self.trusted_origins = []
for url in trusted_origins or []:
hostname = urlparse(url).hostname
if not hostname:
raise ValueError(f"Invalid URL: Unable to determine hostname of {url}")
self.trusted_origins.append(hostname)
self.raise_for_status = raise_for_status
self.extra_url_validator = extra_url_validator
self.extra_headers = extra_headers
def request(
self, method, url, headers=None, allow_redirects=False, *args, **kwargs
) -> req.Response:
if self.extra_headers is not None:
headers = {**(headers or {}), **self.extra_headers}
url = validate_url(url, self.trusted_origins)
if self.extra_url_validator is not None:
url = self.extra_url_validator(url)
response = req.request(
method,
url,
headers=headers,
allow_redirects=allow_redirects,
*args,
**kwargs,
)
if self.raise_for_status:
response.raise_for_status()
return response
def get(self, url, *args, **kwargs) -> req.Response:
return self.request("GET", url, *args, **kwargs)
def post(self, url, *args, **kwargs) -> req.Response:
return self.request("POST", url, *args, **kwargs)
def put(self, url, *args, **kwargs) -> req.Response:
return self.request("PUT", url, *args, **kwargs)
def delete(self, url, *args, **kwargs) -> req.Response:
return self.request("DELETE", url, *args, **kwargs)
def head(self, url, *args, **kwargs) -> req.Response:
return self.request("HEAD", url, *args, **kwargs)
def options(self, url, *args, **kwargs) -> req.Response:
return self.request("OPTIONS", url, *args, **kwargs)
def patch(self, url, *args, **kwargs) -> req.Response:
return self.request("PATCH", url, *args, **kwargs)
requests = Requests(trusted_origins=Config().trust_endpoints_for_requests)
|
"""PDF Table reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFTableReader(BaseReader):
"""
PDF Table Reader. Reads table from PDF.
Args:
row_separator (str): Row separator used to join rows of a DataFrame.
col_separator (str): Col separator used to join columns of a DataFrame.
"""
def __init__(
self,
*args: Any,
row_separator: str = "\n",
col_separator: str = ", ",
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self._row_separator = row_separator
self._col_separator = col_separator
def load_data(
self, file: Path, pages: str = "1", extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Load data and extract table from PDF file.
Args:
file (Path): Path for the PDF file.
pages (str): Pages to read tables from.
extra_info (Optional[Dict]): Extra information.
Returns:
List[Document]: List of documents.
"""
import camelot
results = []
tables = camelot.read_pdf(filepath=str(file), pages=pages)
for table in tables:
document = self._dataframe_to_document(df=table.df, extra_info=extra_info)
results.append(document)
return results
def _dataframe_to_document(
self, df: pd.DataFrame, extra_info: Optional[Dict] = None
) -> Document:
df_list = df.apply(
lambda row: (self._col_separator).join(row.astype(str).tolist()), axis=1
).tolist()
return Document(
text=self._row_separator.join(df_list), extra_info=extra_info or {}
)
|
"""PDF Table reader."""
from pathlib import Path
from typing import Any, Dict, List, Optional
import pandas as pd
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class PDFTableReader(BaseReader):
"""PDF Table Reader. Reads table from PDF.
Args:
row_separator (str): Row separator used to join rows of a DataFrame.
col_separator (str): Col separator used to join columns of a DataFrame.
"""
def __init__(
self,
*args: Any,
row_separator: str = "\n",
col_separator: str = ", ",
**kwargs: Any
) -> None:
super().__init__(*args, **kwargs)
self._row_separator = row_separator
self._col_separator = col_separator
def load_data(
self, file: Path, pages: str = "1", extra_info: Optional[Dict] = None
) -> List[Document]:
"""Load data and extract table from PDF file.
Args:
file (Path): Path for the PDF file.
pages (str): Pages to read tables from.
extra_info (Optional[Dict]): Extra information.
Returns:
List[Document]: List of documents.
"""
import camelot
results = []
tables = camelot.read_pdf(filepath=str(file), pages=pages)
for table in tables:
document = self._dataframe_to_document(df=table.df, extra_info=extra_info)
results.append(document)
return results
def _dataframe_to_document(
self, df: pd.DataFrame, extra_info: Optional[Dict] = None
) -> Document:
df_list = df.apply(
lambda row: (self._col_separator).join(row.astype(str).tolist()), axis=1
).tolist()
return Document(
text=self._row_separator.join(df_list), extra_info=extra_info or {}
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
"""
Data pre-processing: create tsv files for training (and valiation).
"""
import logging
import re
from pathlib import Path
from typing import Dict, Tuple, Union
import torch
import torchaudio
_LG = logging.getLogger(__name__)
def create_tsv(
root_dir: Union[str, Path],
out_dir: Union[str, Path],
dataset: str = "librispeech",
valid_percent: float = 0.01,
seed: int = 0,
extension: str = "flac",
) -> None:
"""Create file lists for training and validation.
Args:
root_dir (str or Path): The directory of the dataset.
out_dir (str or Path): The directory to store the file lists.
dataset (str, optional): The dataset to use. Options:
[``librispeech``, ``libri-light``]. (Default: ``librispeech``)
valid_percent (float, optional): The percentage of data for validation. (Default: 0.01)
seed (int): The seed for randomly selecting the validation files.
extension (str, optional): The extension of audio files. (Default: ``flac``)
Returns:
None
"""
assert valid_percent >= 0 and valid_percent <= 1.0
torch.manual_seed(seed)
root_dir = Path(root_dir)
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir()
valid_f = open(out_dir / f"{dataset}_valid.tsv", "w") if valid_percent > 0 else None
search_pattern = ".*train.*"
with open(out_dir / f"{dataset}_train.tsv", "w") as train_f:
print(root_dir, file=train_f)
if valid_f is not None:
print(root_dir, file=valid_f)
for fname in root_dir.glob(f"**/*.{extension}"):
if re.match(search_pattern, str(fname)):
frames = torchaudio.info(fname).num_frames
dest = train_f if torch.rand(1) > valid_percent else valid_f
print(f"{fname.relative_to(root_dir)}\t{frames}", file=dest)
if valid_f is not None:
valid_f.close()
_LG.info("Finished creating the file lists successfully")
def _get_feat_lens_paths(feat_dir: Path, split: str, rank: int, num_rank: int) -> Tuple[Path, Path]:
r"""Get the feature and lengths paths based on feature directory,
data split, rank, and number of ranks.
Args:
feat_dir (Path): The directory that stores the feature and lengths tensors.
split (str): The split of data. Options: [``train``, ``valid``].
rank (int): The rank in the multi-processing.
num_rank (int): The number of ranks for multi-processing in feature extraction.
Returns:
(Path, Path)
Path: The file path of the feature tensor for the current rank.
Path: The file path of the lengths tensor for the current rank.
"""
feat_path = feat_dir / f"{split}_{rank}_{num_rank}.pt"
len_path = feat_dir / f"len_{split}_{rank}_{num_rank}.pt"
return feat_path, len_path
def _get_model_path(km_dir: Path) -> Path:
r"""Get the file path of the KMeans clustering model
Args:
km_dir (Path): The directory to store the KMeans clustering model.
Returns:
Path: The file path of the model.
"""
return km_dir / "model.pt"
def _get_id2label() -> Dict:
"""Get the dictionary that maps indices of ASR model's last layer dimension to the corresponding labels."""
bundle = torchaudio.pipelines.HUBERT_ASR_LARGE
labels = bundle.get_labels()
return {i: char.lower() for i, char in enumerate(labels)}
def _get_label2id() -> Dict:
"""Get the dictionary that maps the labels to the corresponding indices in ASR model's last dimension."""
bundle = torchaudio.pipelines.HUBERT_ASR_LARGE
labels = bundle.get_labels()
return {char: i for i, char in enumerate(labels)}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
"""
Data pre-processing: create tsv files for training (and valiation).
"""
import logging
import re
from pathlib import Path
from typing import Tuple, Union
import torch
import torchaudio
_LG = logging.getLogger(__name__)
def create_tsv(
root_dir: Union[str, Path],
out_dir: Union[str, Path],
dataset: str = "librispeech",
valid_percent: float = 0.01,
seed: int = 0,
extension: str = "flac",
) -> None:
"""Create file lists for training and validation.
Args:
root_dir (str or Path): The directory of the dataset.
out_dir (str or Path): The directory to store the file lists.
dataset (str, optional): The dataset to use. Options:
[``librispeech``, ``libri-light``]. (Default: ``librispeech``)
valid_percent (float, optional): The percentage of data for validation. (Default: 0.01)
seed (int): The seed for randomly selecting the validation files.
extension (str, optional): The extension of audio files. (Default: ``flac``)
Returns:
None
"""
assert valid_percent >= 0 and valid_percent <= 1.0
torch.manual_seed(seed)
root_dir = Path(root_dir)
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir()
valid_f = open(out_dir / f"{dataset}_valid.tsv", "w") if valid_percent > 0 else None
search_pattern = ".*train.*"
with open(out_dir / f"{dataset}_train.tsv", "w") as train_f:
print(root_dir, file=train_f)
if valid_f is not None:
print(root_dir, file=valid_f)
for fname in root_dir.glob(f"**/*.{extension}"):
if re.match(search_pattern, str(fname)):
frames = torchaudio.info(fname).num_frames
dest = train_f if torch.rand(1) > valid_percent else valid_f
print(f"{fname.relative_to(root_dir)}\t{frames}", file=dest)
if valid_f is not None:
valid_f.close()
_LG.info("Finished creating the file lists successfully")
def _get_feat_lens_paths(feat_dir: Path, split: str, rank: int, num_rank: int) -> Tuple[Path, Path]:
r"""Get the feature and lengths paths based on feature directory,
data split, rank, and number of ranks.
Args:
feat_dir (Path): The directory that stores the feature and lengths tensors.
split (str): The split of data. Options: [``train``, ``valid``].
rank (int): The rank in the multi-processing.
num_rank (int): The number of ranks for multi-processing in feature extraction.
Returns:
(Path, Path)
Path: The file path of the feature tensor for the current rank.
Path: The file path of the lengths tensor for the current rank.
"""
feat_path = feat_dir / f"{split}_{rank}_{num_rank}.pt"
len_path = feat_dir / f"len_{split}_{rank}_{num_rank}.pt"
return feat_path, len_path
def _get_model_path(km_dir: Path) -> Path:
r"""Get the file path of the KMeans clustering model
Args:
km_dir (Path): The directory to store the KMeans clustering model.
Returns:
Path: The file path of the model.
"""
return km_dir / "model.pt"
|
from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to be sent.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365SendMessage(O365BaseTool):
"""Send an email in Office 365."""
name: str = "send_email"
description: str = (
"Use this tool to send an email with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.send()
output = "Message sent: " + str(message)
return output
|
from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to be sent.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365SendMessage(O365BaseTool): # type: ignore[override, override]
"""Send an email in Office 365."""
name: str = "send_email"
description: str = (
"Use this tool to send an email with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.send()
output = "Message sent: " + str(message)
return output
|
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str
class EmbeddingResponseModel(BaseDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class SampleExecutor(Executor):
@requests(on="/encode")
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(
EmbeddingResponseModel(id=doc.id, embeddings=np.random.random((1, 64)))
)
return DocList[EmbeddingResponseModel](ret)
|
import numpy as np
from docarray import BaseDoc, DocList
from docarray.typing import NdArray
from pydantic import Field
from jina import Executor, requests
class TextDoc(BaseDoc):
text: str
class EmbeddingResponseModel(BaseDoc):
embeddings: NdArray = Field(description="The embedding of the texts", default=[])
class Config(BaseDoc.Config):
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {NdArray: lambda v: v.tolist()}
class SampleExecutor(Executor):
@requests(on="/encode")
def foo(self, docs: DocList[TextDoc], **kwargs) -> DocList[EmbeddingResponseModel]:
ret = []
for doc in docs:
ret.append(EmbeddingResponseModel(embeddings=np.random.random((1, 64))))
return DocList[EmbeddingResponseModel](ret)
|
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 61100840,
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_metrics": {
"ImageNet-1K": {
"acc@1": 56.522,
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_file_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
.. note::
AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
paper. Our implementation is based instead on the "One weird trick"
paper above.
Args:
weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.AlexNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.AlexNet_Weights
:members:
"""
weights = AlexNet_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = AlexNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
return model
|
from functools import partial
from typing import Any, Optional
import torch
import torch.nn as nn
from ..transforms._presets import ImageClassification
from ..utils import _log_api_usage_once
from ._api import register_model, Weights, WeightsEnum
from ._meta import _IMAGENET_CATEGORIES
from ._utils import _ovewrite_named_param, handle_legacy_interface
__all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
class AlexNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
super().__init__()
_log_api_usage_once(self)
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class AlexNet_Weights(WeightsEnum):
IMAGENET1K_V1 = Weights(
url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 61100840,
"min_size": (63, 63),
"categories": _IMAGENET_CATEGORIES,
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
"_metrics": {
"ImageNet-1K": {
"acc@1": 56.522,
"acc@5": 79.066,
}
},
"_ops": 0.714,
"_file_size": 233.087,
"_docs": """
These weights reproduce closely the results of the paper using a simplified training recipe.
""",
},
)
DEFAULT = IMAGENET1K_V1
@register_model()
@handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
"""AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
.. note::
AlexNet was originally introduced in the `ImageNet Classification with
Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
paper. Our implementation is based instead on the "One weird trick"
paper above.
Args:
weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
pretrained weights to use. See
:class:`~torchvision.models.AlexNet_Weights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the
download to stderr. Default is True.
**kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
for more details about this class.
.. autoclass:: torchvision.models.AlexNet_Weights
:members:
"""
weights = AlexNet_Weights.verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
model = AlexNet(**kwargs)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler):
"""Callback handler that returns an async iterator.
Only the final output of the agent will be iterated.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
**kwargs: Any,
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
self.answer_reached = False
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if self.answer_reached:
self.done.set()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
self.queue.put_nowait(t)
return
# If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token)
|
from __future__ import annotations
from typing import Any, Optional
from langchain_core.outputs import LLMResult
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"]
class AsyncFinalIteratorCallbackHandler(AsyncIteratorCallbackHandler):
"""Callback handler that returns an async iterator.
Only the final output of the agent will be iterated.
"""
def append_to_last_tokens(self, token: str) -> None:
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: Optional[list[str]] = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate AsyncFinalIteratorCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
async def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
# If two calls are made in a row, this resets the state
self.done.clear()
self.answer_reached = False
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
if self.answer_reached:
self.done.set()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
self.queue.put_nowait(t)
return
# If yes, then put tokens from now on
if self.answer_reached:
self.queue.put_nowait(token)
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Dict, Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average (EMA) on the model during
training.
Note:
- EMAHook takes priority over CheckpointHook.
- The original model parameters are actually saved in ema field after
train.
Args:
ema_type (str): The type of EMA strategy to use. You can find the
supported strategies in ``mmengine.model.averaged_model``.
Defaults to 'ExponentialMovingAverage'
"""
priority = 'NORMAL'
def __init__(self, ema_type: str = 'ExponentialMovingAverage', **kwargs):
self.ema_cfg = dict(type=ema_type, **kwargs)
def before_run(self, runner) -> None:
"""Create an ema copy of the model."""
model = runner.model
if is_model_wrapper(model):
model = model.module
self.src_model = model
self.ema_model = MODELS.build(
self.ema_cfg, default_args=dict(model=self.src_model))
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ema parameter."""
self.ema_model.update_parameters(self.src_model)
def before_val_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
validation."""
self._swap_ema_parameters()
def after_val_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after
validation."""
self._swap_ema_parameters()
def before_test_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
test."""
self._swap_ema_parameters()
def after_test_epoch(self,
runner,
metrics: Optional[Dict[str, float]] = None) -> None:
"""We recover source model's parameter from ema model after test."""
self._swap_ema_parameters()
def before_save_checkpoint(self, runner, checkpoint: dict) -> None:
"""Save ema parameters to checkpoint."""
# save ema parameters to the source model's state dict so that we can
# directly load the averaged model weights for deployment.
self._swap_ema_parameters()
checkpoint['ema_state_dict'] = self.ema_model.state_dict()
self._swap_ema_parameters()
def after_load_checkpoint(self, runner, checkpoint: dict) -> None:
"""Resume ema parameters from checkpoint."""
self.ema_model.load_state_dict(checkpoint['ema_state_dict'])
# The original model parameters are actually saved in ema field.
# swap the weights back to resume ema state.
self._swap_ema_parameters()
def _swap_ema_parameters(self) -> None:
"""Swap the parameter of model with ema_model."""
avg_param = (
itertools.chain(self.ema_model.module.parameters(),
self.ema_model.module.buffers())
if self.ema_model.update_buffers else
self.ema_model.module.parameters())
src_param = (
itertools.chain(self.src_model.parameters(),
self.src_model.buffers())
if self.ema_model.update_buffers else self.src_model.parameters())
for p_avg, p_src in zip(avg_param, src_param):
tmp = p_avg.data.clone()
p_avg.data.copy_(p_src.data)
p_src.data.copy_(tmp)
|
# Copyright (c) OpenMMLab. All rights reserved.
import itertools
from typing import Optional
from mmengine.model import is_model_wrapper
from mmengine.registry import HOOKS, MODELS
from .hook import DATA_BATCH, Hook
@HOOKS.register_module()
class EMAHook(Hook):
"""A Hook to apply Exponential Moving Average (EMA) on the model during
training.
Note:
- EMAHook takes priority over CheckpointHook.
- The original model parameters are actually saved in ema field after
train.
Args:
ema_type (str): The type of EMA strategy to use. You can find the
supported strategies in ``mmengine.model.averaged_model``.
Defaults to 'ExponentialMovingAverage'
"""
def __init__(self, ema_type: str = 'ExponentialMovingAverage', **kwargs):
self.ema_cfg = dict(type=ema_type, **kwargs)
def before_run(self, runner) -> None:
"""Create an ema copy of the model."""
model = runner.model
if is_model_wrapper(model):
model = model.module
self.src_model = model
self.ema_model = MODELS.build(
self.ema_cfg, default_args=dict(model=self.src_model))
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Update ema parameter."""
self.ema_model.update_parameters(self.src_model)
def before_val_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
validation."""
self._swap_ema_parameters()
def after_val_epoch(self, runner) -> None:
"""We recover source model's parameter from ema model after
validation."""
self._swap_ema_parameters()
def before_test_epoch(self, runner) -> None:
"""We load parameter values from ema model to source model before
test."""
self._swap_ema_parameters()
def after_test_epoch(self, runner) -> None:
"""We recover source model's parameter from ema model after test."""
self._swap_ema_parameters()
def before_save_checkpoint(self, runner, checkpoint: dict) -> None:
"""Save ema parameters to checkpoint."""
# save ema parameters to the source model's state dict so that we can
# directly load the averaged model weights for deployment.
self._swap_ema_parameters()
checkpoint['ema_state_dict'] = self.ema_model.state_dict()
self._swap_ema_parameters()
def after_load_checkpoint(self, runner, checkpoint: dict) -> None:
"""Resume ema parameters from checkpoint."""
self.ema_model.load_state_dict(checkpoint['ema_state_dict'])
# The original model parameters are actually saved in ema field.
# swap the weights back to resume ema state.
self._swap_ema_parameters()
def _swap_ema_parameters(self) -> None:
"""Swap the parameter of model with ema_model."""
avg_param = (
itertools.chain(self.ema_model.module.parameters(),
self.ema_model.module.buffers())
if self.ema_model.update_buffers else
self.ema_model.module.parameters())
src_param = (
itertools.chain(self.src_model.parameters(),
self.src_model.buffers())
if self.ema_model.update_buffers else self.src_model.parameters())
for p_avg, p_src in zip(avg_param, src_param):
tmp = p_avg.data.clone()
p_avg.data.copy_(p_src.data)
p_src.data.copy_(tmp)
|
from __future__ import annotations
from torch import Tensor, nn
from sentence_transformers.models.Module import Module
class Dropout(Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
config_keys: list[str] = ["dropout"]
def __init__(self, dropout: float = 0.2):
super().__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
self.save_config(output_path)
|
from __future__ import annotations
import json
import os
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super().__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
import json
import logging
import os
from typing import Dict, List, Literal
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(
self, tokenized_texts: List[List[int]], pad_seq_length: int = 0
) -> Dict[Literal["sentence_embedding"], torch.Tensor]:
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
import json
import logging
import os
from typing import Dict, List
import torch
from torch import Tensor, nn
from .tokenizer import WhitespaceTokenizer
logger = logging.getLogger(__name__)
class BoW(nn.Module):
"""Implements a Bag-of-Words (BoW) model to derive sentence embeddings.
A weighting can be added to allow the generation of tf-idf vectors. The output vector has the size of the vocab.
"""
def __init__(
self,
vocab: List[str],
word_weights: Dict[str, float] = {},
unknown_word_weight: float = 1,
cumulative_term_frequency: bool = True,
):
super(BoW, self).__init__()
vocab = list(set(vocab)) # Ensure vocab is unique
self.config_keys = ["vocab", "word_weights", "unknown_word_weight", "cumulative_term_frequency"]
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
# Maps wordIdx -> word weight
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if word in word_weights:
weight = word_weights[word]
elif word.lower() in word_weights:
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logger.info(
"{} out of {} words without a weighting value. Set weight to {}".format(
num_unknown_words, len(vocab), unknown_word_weight
)
)
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[str, Tensor]):
# Nothing to do, everything is done in get_sentence_features
return features
def tokenize(self, texts: List[str], **kwargs) -> List[int]:
tokenized = [self.tokenizer.tokenize(text, **kwargs) for text in texts]
return self.get_sentence_features(tokenized)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokenized_texts: List[List[int]], pad_seq_length: int = 0):
vectors = []
for tokens in tokenized_texts:
vector = torch.zeros(self.get_sentence_embedding_dimension(), dtype=torch.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
vectors.append(vector)
return {"sentence_embedding": torch.stack(vectors)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
return BoW(**config)
|
from keras.src.export.onnx import export_onnx
from keras.src.export.saved_model import ExportArchive
from keras.src.export.saved_model import export_saved_model
from keras.src.export.tfsm_layer import TFSMLayer
|
from keras.src.export.export_lib import ExportArchive
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
WeaviateArrayType = TypeVar(
'WeaviateArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
def _find_similar_vectors(
self, query: 'WeaviateArrayType', limit=10, filter: Optional[Dict] = None
):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
query_dict = {'vector': query}
query_builder = (
self._client.query.get(self._class_name, '_serialized')
.with_additional(['id', 'certainty'])
.with_limit(limit)
.with_near_vector(query_dict)
)
if filter:
query_builder = query_builder.with_where(filter)
results = query_builder.do()
docs = []
if 'errors' in results:
errors = '\n'.join(map(lambda error: error['message'], results['errors']))
raise ValueError(
f'find failed, please check your filter query. Errors: \n{errors}'
)
found_results = results.get('data', {}).get('Get', {}).get(self._class_name, [])
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in found_results:
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
certainty = result['_additional']['certainty']
doc.scores['weaviate_certainty'] = NamedScore(value=certainty)
if certainty is None:
doc.scores['cosine_similarity'] = NamedScore(value=None)
else:
doc.scores['cosine_similarity'] = NamedScore(value=2 * certainty - 1)
doc.tags['wid'] = result['_additional']['id']
docs.append(doc)
return DocumentArray(docs)
def _filter(
self,
filter: Dict,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (Weaviate `where` filter).
:param filter: the input filter to apply in each stored document
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
if not filter:
return self
results = (
self._client.query.get(self._class_name, '_serialized')
.with_additional('id')
.with_where(filter)
.do()
)
docs = []
if 'errors' in results:
errors = '\n'.join(map(lambda error: error['message'], results['errors']))
raise ValueError(
f'filter failed, please check your filter query. Errors: \n{errors}'
)
found_results = results.get('data', {}).get('Get', {}).get(self._class_name, [])
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in found_results:
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
doc.tags['wid'] = result['_additional']['id']
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'WeaviateArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Weaviate. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
Note: Weaviate returns `certainty` values. To get cosine similarities one needs to use `cosine_sim = 2*certainty - 1` as explained here:
https://weaviate.io/developers/weaviate/current/more-resources/faq.html#q-how-do-i-get-the-cosine-similarity-from-weaviates-certainty
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
|
from typing import (
TYPE_CHECKING,
TypeVar,
Sequence,
List,
Dict,
Optional,
)
import numpy as np
from .... import Document, DocumentArray
from ....math import ndarray
from ....math.helper import EPSILON
from ....math.ndarray import to_numpy_array
from ....score import NamedScore
if TYPE_CHECKING:
import tensorflow
import torch
WeaviateArrayType = TypeVar(
'WeaviateArrayType',
np.ndarray,
tensorflow.Tensor,
torch.Tensor,
Sequence[float],
)
class FindMixin:
def _find_similar_vectors(
self, query: 'WeaviateArrayType', limit=10, filter: Optional[Dict] = None
):
query = to_numpy_array(query)
is_all_zero = np.all(query == 0)
if is_all_zero:
query = query + EPSILON
query_dict = {'vector': query}
query_builder = (
self._client.query.get(self._class_name, '_serialized')
.with_additional(['id', 'certainty'])
.with_limit(limit)
.with_near_vector(query_dict)
)
if filter:
query_builder = query_builder.with_where(filter)
results = query_builder.do()
docs = []
if 'errors' in results:
errors = '\n'.join(map(lambda error: error['message'], results['errors']))
raise ValueError(
f'find failed, please check your filter query. Errors: \n{errors}'
)
found_results = results.get('data', {}).get('Get', {}).get(self._class_name, [])
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in found_results:
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
certainty = result['_additional']['certainty']
doc.scores['weaviate_certainty'] = NamedScore(value=certainty)
if certainty is None:
doc.scores['cosine_similarity'] = NamedScore(value=None)
else:
doc.scores['cosine_similarity'] = NamedScore(value=2 * certainty - 1)
doc.tags['wid'] = result['_additional']['id']
docs.append(doc)
return DocumentArray(docs)
def _filter(
self,
filter: Dict,
) -> 'DocumentArray':
"""Returns a subset of documents by filtering by the given filter (Weaviate `where` filter).
:param filter: the input filter to apply in each stored document
:return: a `DocumentArray` containing the `Document` objects that verify the filter.
"""
if not filter:
return self
results = (
self._client.query.get(self._class_name, '_serialized')
.with_additional('id')
.with_where(filter)
.do()
)
docs = []
if 'errors' in results:
errors = '\n'.join(map(lambda error: error['message'], results['errors']))
raise ValueError(
f'filter failed, please check your filter query. Errors: \n{errors}'
)
found_results = results.get('data', {}).get('Get', {}).get(self._class_name, [])
# The serialized document is stored in results['data']['Get'][self._class_name]
for result in found_results:
doc = Document.from_base64(result['_serialized'], **self._serialize_config)
doc.tags['wid'] = result['_additional']['id']
docs.append(doc)
return DocumentArray(docs)
def _find(
self,
query: 'WeaviateArrayType',
limit: int = 10,
filter: Optional[Dict] = None,
**kwargs,
) -> List['DocumentArray']:
"""Returns approximate nearest neighbors given a batch of input queries.
:param query: input supported to be stored in Weaviate. This includes any from the list '[np.ndarray, tensorflow.Tensor, torch.Tensor, Sequence[float]]'
:param limit: number of retrieved items
:param filter: filter query used for pre-filtering
:return: DocumentArray containing the closest documents to the query if it is a single query, otherwise a list of DocumentArrays containing
the closest Document objects for each of the queries in `query`.
Note: Weaviate returns `certainty` values. To get cosine similarities one needs to use `cosine_sim = 2*certainty - 1` as explained here:
https://www.semi.technology/developers/weaviate/current/more-resources/faq.html#q-how-do-i-get-the-cosine-similarity-from-weaviates-certainty
"""
num_rows, _ = ndarray.get_array_rows(query)
if num_rows == 1:
return [self._find_similar_vectors(query, limit=limit, filter=filter)]
else:
closest_docs = []
for q in query:
da = self._find_similar_vectors(q, limit=limit, filter=filter)
closest_docs.append(da)
return closest_docs
|
import pytest
from jina.excepts import NoAvailablePortError
|
import pytest
from jina.excepts import NoAvailablePortError
@pytest.fixture(scope='function', autouse=True)
def patched_random_port(mocker):
print('using random port fixture...')
used_ports = set()
from jina.helper import random_port
def _random_port():
for i in range(10):
_port = random_port()
if _port is not None and _port not in used_ports:
used_ports.add(_port)
return _port
raise NoAvailablePortError
mocker.patch('jina.helper.random_port', new_callable=lambda: _random_port)
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg_cuda():
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device="cuda")
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg_cuda()
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import os
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
from torchvision.io import read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.ndim != 3 or img_jpg.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.ndim != 3 or img_png.numel() < 100:
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms()
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
if torch.cuda.is_available():
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
# mypy: allow-untyped-defs
__all__ = ["hashable", "transitive_get", "raises", "reverse_dict", "xfail", "freeze"]
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
"""Transitive dict.get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.get(1)
2
>>> transitive_get(1, d)
4
"""
while hashable(key) and key in d:
key = d[key]
return key
def raises(err, lamda): # codespell:ignore lamda
try:
lamda() # codespell:ignore lamda
return False
except err:
return True
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
"""Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> # xdoctest: +SKIP
>>> _toposort({1: (2, 3), 2: (3,)})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = {k: set(val) for k, val in incoming_edges.items()}
S = {v for v in edges if v not in incoming_edges}
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {"a": (1, 2), "b": (2, 3), "c": ()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {} # type: ignore[var-annotated]
for key in d:
for val in d[key]:
result[val] = result.get(val, ()) + (key,)
return result
def xfail(func):
try:
func()
raise Exception("XFailed test passed") # pragma:nocover # noqa: TRY002
except Exception:
pass
def freeze(d):
"""Freeze container to hashable form
>>> freeze(1)
1
>>> freeze([1, 2])
(1, 2)
>>> freeze({1: 2}) # doctest: +SKIP
frozenset([(1, 2)])
"""
if isinstance(d, dict):
return frozenset(map(freeze, d.items()))
if isinstance(d, set):
return frozenset(map(freeze, d))
if isinstance(d, (tuple, list)):
return tuple(map(freeze, d))
return d
|
# mypy: allow-untyped-defs
__all__ = ["hashable", "transitive_get", "raises", "reverse_dict", "xfail", "freeze"]
def hashable(x):
try:
hash(x)
return True
except TypeError:
return False
def transitive_get(key, d):
"""Transitive dict.get
>>> d = {1: 2, 2: 3, 3: 4}
>>> d.get(1)
2
>>> transitive_get(1, d)
4
"""
while hashable(key) and key in d:
key = d[key]
return key
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
"""Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> # xdoctest: +SKIP
>>> _toposort({1: (2, 3), 2: (3,)})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] http://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = {k: set(val) for k, val in incoming_edges.items()}
S = {v for v in edges if v not in incoming_edges}
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {"a": (1, 2), "b": (2, 3), "c": ()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {} # type: ignore[var-annotated]
for key in d:
for val in d[key]:
result[val] = result.get(val, ()) + (key,)
return result
def xfail(func):
try:
func()
raise Exception("XFailed test passed") # pragma:nocover # noqa: TRY002
except Exception:
pass
def freeze(d):
"""Freeze container to hashable form
>>> freeze(1)
1
>>> freeze([1, 2])
(1, 2)
>>> freeze({1: 2}) # doctest: +SKIP
frozenset([(1, 2)])
"""
if isinstance(d, dict):
return frozenset(map(freeze, d.items()))
if isinstance(d, set):
return frozenset(map(freeze, d))
if isinstance(d, (tuple, list)):
return tuple(map(freeze, d))
return d
|
import os
import urllib
import pytest
from pydantic import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_to_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_to_bytes(timeout=0.001)
def test_json_schema():
schema_json_of(TextUrl)
def test_dump_json():
url = parse_obj_as(TextUrl, REMOTE_TXT)
orjson_dumps(url)
|
import os
import urllib
import pytest
from pydantic import parse_obj_as
from docarray.typing import TextUrl
REMOTE_TXT = 'https://de.wikipedia.org/wiki/Brixen'
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
LOCAL_TXT = os.path.join(CUR_DIR, '..', '..', '..', 'toydata', 'penal_colony.txt')
@pytest.mark.parametrize(
'url,expected_beginning',
[(REMOTE_TXT, '<!DOCTYPE html>'), (LOCAL_TXT, '“It’s a peculiar apparatus,”')],
)
def test_load(url, expected_beginning):
uri = parse_obj_as(TextUrl, url)
txt = uri.load()
assert txt.startswith(expected_beginning)
@pytest.mark.parametrize('url', [REMOTE_TXT, LOCAL_TXT])
def test_load_to_bytes(url):
uri = parse_obj_as(TextUrl, url)
txt_bytes = uri.load_to_bytes()
assert isinstance(txt_bytes, bytes)
def test_proto_text_url():
uri = parse_obj_as(TextUrl, LOCAL_TXT)
uri._to_node_protobuf()
def test_load_timeout():
url = parse_obj_as(TextUrl, REMOTE_TXT)
with pytest.raises(urllib.error.URLError):
_ = url.load(timeout=0.001)
with pytest.raises(urllib.error.URLError):
_ = url.load_to_bytes(timeout=0.001)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import subprocess
from pathlib import Path
import pytest
@pytest.fixture(scope='session')
def docker_image_name() -> str:
return Path(__file__).parents[1].stem.lower()
@pytest.fixture(scope='session')
def build_docker_image(docker_image_name: str) -> str:
subprocess.run(['docker', 'build', '-t', docker_image_name, '.'], check=True)
return docker_image_name
@pytest.fixture(scope='session')
def build_docker_image_gpu(docker_image_name: str) -> str:
image_name = f'{docker_image_name}:gpu'
subprocess.run(
['docker', 'build', '-t', image_name, '-f', 'Dockerfile.gpu', '.'], check=True
)
return image_name
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from ...laser_encoder import LaserEncoder
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_flair_batch(docs_generator):
encoder = LaserEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (1024,)
def test_traversal_path():
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text)
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
encoder = LaserEncoder()
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['c']})
for path, count in [[['r'], 0], [['c'], 3], [['cc'], 0]]:
assert len(docs.traverse_flat(path).get_attributes('embedding')) == count
if count > 0:
assert docs.traverse_flat(path).get_attributes('embedding')[0].shape == (1024,)
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['cc']})
for path, count in [[['r'], 0], [['c'], 3], [['cc'], 2]]:
assert len(docs.traverse_flat(path).get_attributes('embedding')) == count
if count > 0:
assert docs.traverse_flat(path).get_attributes('embedding')[0].shape == (1024,)
def test_no_documents():
encoder = LaserEncoder()
docs = []
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert not docs
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import pytest
from jina import Document, DocumentArray
from jinahub.encoder.laser_encoder import LaserEncoder
@pytest.fixture()
def docs_generator():
return DocumentArray((Document(text='random text') for _ in range(30)))
def test_flair_batch(docs_generator):
encoder = LaserEncoder()
docs = docs_generator
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert len(docs.get_attributes('embedding')) == 30
assert docs[0].embedding.shape == (1024,)
def test_traversal_path():
text = 'blah'
docs = DocumentArray([Document(id='root1', text=text)])
docs[0].chunks = [Document(id='chunk11', text=text),
Document(id='chunk12', text=text),
Document(id='chunk13', text=text)
]
docs[0].chunks[0].chunks = [
Document(id='chunk111', text=text),
Document(id='chunk112', text=text),
]
encoder = LaserEncoder()
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['c']})
for path, count in [[['r'], 0], [['c'], 3], [['cc'], 0]]:
assert len(docs.traverse_flat(path).get_attributes('embedding')) == count
if count > 0:
assert docs.traverse_flat(path).get_attributes('embedding')[0].shape == (1024,)
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['cc']})
for path, count in [[['r'], 0], [['c'], 3], [['cc'], 2]]:
assert len(docs.traverse_flat(path).get_attributes('embedding')) == count
if count > 0:
assert docs.traverse_flat(path).get_attributes('embedding')[0].shape == (1024,)
def test_no_documents():
encoder = LaserEncoder()
docs = []
encoder.encode(docs, parameters={'batch_size': 10, 'traversal_paths': ['r']})
assert not docs
|
# Copyright (c) OpenMMLab. All rights reserved.
import pickle
from collections import OrderedDict
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
assert len(message_hub.log_scalars) == 0
assert len(message_hub.log_scalars) == 0
# The type of log_scalars's value must be `HistoryBuffer`.
with pytest.raises(AssertionError):
MessageHub('hello', log_scalars=OrderedDict(a=1))
# `Resumed_keys`
with pytest.raises(AssertionError):
MessageHub(
'hello',
runtime_info=OrderedDict(iter=1),
resumed_keys=OrderedDict(iters=False))
def test_update_scalar(self):
message_hub = MessageHub.get_instance('mmengine')
# test create target `HistoryBuffer` by name
message_hub.update_scalar('name', 1)
log_buffer = message_hub.log_scalars['name']
assert (log_buffer._log_history == np.array([1])).all()
# test update target `HistoryBuffer` by name
message_hub.update_scalar('name', 1)
assert (log_buffer._log_history == np.array([1, 1])).all()
# unmatched string will raise a key error
def test_update_info(self):
message_hub = MessageHub.get_instance('mmengine')
# test runtime value can be overwritten.
message_hub.update_info('key', 2)
assert message_hub.runtime_info['key'] == 2
message_hub.update_info('key', 1)
assert message_hub.runtime_info['key'] == 1
def test_get_scalar(self):
message_hub = MessageHub.get_instance('mmengine')
# Get undefined key will raise error
with pytest.raises(KeyError):
message_hub.get_scalar('unknown')
# test get log_buffer as wished
log_history = np.array([1, 2, 3, 4, 5])
count = np.array([1, 1, 1, 1, 1])
for i in range(len(log_history)):
message_hub.update_scalar('test_value', float(log_history[i]),
int(count[i]))
recorded_history, recorded_count = \
message_hub.get_scalar('test_value').data
assert (log_history == recorded_history).all()
assert (recorded_count == count).all()
def test_get_runtime(self):
message_hub = MessageHub.get_instance('mmengine')
with pytest.raises(KeyError):
message_hub.get_info('unknown')
recorded_dict = dict(a=1, b=2)
message_hub.update_info('test_value', recorded_dict)
assert message_hub.get_info('test_value') == recorded_dict
def test_get_scalars(self):
message_hub = MessageHub.get_instance('mmengine')
log_dict = dict(
loss=1,
loss_cls=torch.tensor(2),
loss_bbox=np.array(3),
loss_iou=dict(value=1, count=2))
message_hub.update_scalars(log_dict)
loss = message_hub.get_scalar('loss')
loss_cls = message_hub.get_scalar('loss_cls')
loss_bbox = message_hub.get_scalar('loss_bbox')
loss_iou = message_hub.get_scalar('loss_iou')
assert loss.current() == 1
assert loss_cls.current() == 2
assert loss_bbox.current() == 3
assert loss_iou.mean() == 0.5
with pytest.raises(TypeError):
loss_dict = dict(error_type=[])
message_hub.update_scalars(loss_dict)
with pytest.raises(AssertionError):
loss_dict = dict(error_type=dict(count=1))
message_hub.update_scalars(loss_dict)
def test_getstate(self):
message_hub = MessageHub.get_instance('name')
# update log_scalars.
message_hub.update_scalar('loss', 0.1)
message_hub.update_scalar('lr', 0.1, resumed=False)
# update runtime information
message_hub.update_info('iter', 1, resumed=True)
message_hub.update_info('feat', [1, 2, 3], resumed=False)
obj = pickle.dumps(message_hub)
instance = pickle.loads(obj)
with pytest.raises(KeyError):
instance.get_info('feat')
with pytest.raises(KeyError):
instance.get_info('lr')
instance.get_info('iter')
instance.get_scalar('loss')
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmengine import MessageHub
class TestMessageHub:
def test_init(self):
message_hub = MessageHub('name')
assert message_hub.instance_name == 'name'
assert len(message_hub.log_buffers) == 0
assert len(message_hub.log_buffers) == 0
def test_update_log(self):
message_hub = MessageHub.get_instance('mmengine')
# test create target `LogBuffer` by name
message_hub.update_log('name', 1)
log_buffer = message_hub.log_buffers['name']
assert (log_buffer._log_history == np.array([1])).all()
# test update target `LogBuffer` by name
message_hub.update_log('name', 1)
assert (log_buffer._log_history == np.array([1, 1])).all()
# unmatched string will raise a key error
def test_update_info(self):
message_hub = MessageHub.get_instance('mmengine')
# test runtime value can be overwritten.
message_hub.update_info('key', 2)
assert message_hub.runtime_info['key'] == 2
message_hub.update_info('key', 1)
assert message_hub.runtime_info['key'] == 1
def test_get_log_buffers(self):
message_hub = MessageHub.get_instance('mmengine')
# Get undefined key will raise error
with pytest.raises(KeyError):
message_hub.get_log('unknown')
# test get log_buffer as wished
log_history = np.array([1, 2, 3, 4, 5])
count = np.array([1, 1, 1, 1, 1])
for i in range(len(log_history)):
message_hub.update_log('test_value', float(log_history[i]),
int(count[i]))
recorded_history, recorded_count = \
message_hub.get_log('test_value').data
assert (log_history == recorded_history).all()
assert (recorded_count == count).all()
def test_get_runtime(self):
message_hub = MessageHub.get_instance('mmengine')
with pytest.raises(KeyError):
message_hub.get_info('unknown')
recorded_dict = dict(a=1, b=2)
message_hub.update_info('test_value', recorded_dict)
assert message_hub.get_info('test_value') == recorded_dict
def test_get_log_vars(self):
message_hub = MessageHub.get_instance('mmengine')
log_dict = dict(
loss=1,
loss_cls=torch.tensor(2),
loss_bbox=np.array(3),
loss_iou=dict(value=1, count=2))
message_hub.update_log_vars(log_dict)
loss = message_hub.get_log('loss')
loss_cls = message_hub.get_log('loss_cls')
loss_bbox = message_hub.get_log('loss_bbox')
loss_iou = message_hub.get_log('loss_iou')
assert loss.current() == 1
assert loss_cls.current() == 2
assert loss_bbox.current() == 3
assert loss_iou.mean() == 0.5
with pytest.raises(TypeError):
loss_dict = dict(error_type=[])
message_hub.update_log_vars(loss_dict)
with pytest.raises(AssertionError):
loss_dict = dict(error_type=dict(count=1))
message_hub.update_log_vars(loss_dict)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.pdf import (
AmazonTextractPDFParser,
DocumentIntelligenceParser,
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
extract_from_images_with_rapidocr,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"extract_from_images_with_rapidocr": (
"langchain_community.document_loaders.parsers.pdf"
),
"PyPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PDFMinerParser": "langchain_community.document_loaders.parsers.pdf",
"PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf",
"PDFPlumberParser": "langchain_community.document_loaders.parsers.pdf",
"AmazonTextractPDFParser": "langchain_community.document_loaders.parsers.pdf",
"DocumentIntelligenceParser": "langchain_community.document_loaders.parsers.pdf",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AmazonTextractPDFParser",
"DocumentIntelligenceParser",
"PDFMinerParser",
"PDFPlumberParser",
"PyMuPDFParser",
"PyPDFParser",
"PyPDFium2Parser",
"extract_from_images_with_rapidocr",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.pdf import (
AmazonTextractPDFParser,
DocumentIntelligenceParser,
PDFMinerParser,
PDFPlumberParser,
PyMuPDFParser,
PyPDFium2Parser,
PyPDFParser,
extract_from_images_with_rapidocr,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"extract_from_images_with_rapidocr": (
"langchain_community.document_loaders.parsers.pdf"
),
"PyPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PDFMinerParser": "langchain_community.document_loaders.parsers.pdf",
"PyMuPDFParser": "langchain_community.document_loaders.parsers.pdf",
"PyPDFium2Parser": "langchain_community.document_loaders.parsers.pdf",
"PDFPlumberParser": "langchain_community.document_loaders.parsers.pdf",
"AmazonTextractPDFParser": "langchain_community.document_loaders.parsers.pdf",
"DocumentIntelligenceParser": "langchain_community.document_loaders.parsers.pdf",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"extract_from_images_with_rapidocr",
"PyPDFParser",
"PDFMinerParser",
"PyMuPDFParser",
"PyPDFium2Parser",
"PDFPlumberParser",
"AmazonTextractPDFParser",
"DocumentIntelligenceParser",
]
|
"""
Slides parser.
Contains parsers for .pptx files.
"""
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.utils import infer_torch_device
class PptxReader(BaseReader):
"""
Powerpoint parser.
Extract text, caption images, and specify slides.
"""
def __init__(self) -> None:
"""Init parser."""
try:
import torch # noqa
from PIL import Image # noqa
from pptx import Presentation # noqa
from transformers import (
AutoTokenizer,
VisionEncoderDecoderModel,
ViTFeatureExtractor,
)
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the PptxReader: "
"`pip install torch transformers python-pptx Pillow`"
)
model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
feature_extractor = ViTFeatureExtractor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
self.parser_config = {
"feature_extractor": feature_extractor,
"model": model,
"tokenizer": tokenizer,
}
def caption_image(self, tmp_image_file: str) -> str:
"""Generate text caption of image."""
from PIL import Image
model = self.parser_config["model"]
feature_extractor = self.parser_config["feature_extractor"]
tokenizer = self.parser_config["tokenizer"]
device = infer_torch_device()
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
i_image = Image.open(tmp_image_file)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
pixel_values = feature_extractor(
images=[i_image], return_tensors="pt"
).pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
return preds[0].strip()
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
from pptx import Presentation
if fs:
with fs.open(file) as f:
presentation = Presentation(f)
else:
presentation = Presentation(file)
result = ""
for i, slide in enumerate(presentation.slides):
result += f"\n\nSlide #{i}: \n"
for shape in slide.shapes:
if hasattr(shape, "image"):
image = shape.image
# get image "file" contents
image_bytes = image.blob
# temporarily save the image to feed into model
f = tempfile.NamedTemporaryFile("wb", delete=False)
try:
f.write(image_bytes)
f.close()
result += f"\n Image: {self.caption_image(f.name)}\n\n"
finally:
os.unlink(f.name)
if hasattr(shape, "text"):
result += f"{shape.text}\n"
return [Document(text=result, metadata=extra_info or {})]
|
"""Slides parser.
Contains parsers for .pptx files.
"""
import os
import tempfile
from pathlib import Path
from typing import Dict, List, Optional
from fsspec import AbstractFileSystem
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.core.utils import infer_torch_device
class PptxReader(BaseReader):
"""Powerpoint parser.
Extract text, caption images, and specify slides.
"""
def __init__(self) -> None:
"""Init parser."""
try:
import torch # noqa
from PIL import Image # noqa
from pptx import Presentation # noqa
from transformers import (
AutoTokenizer,
VisionEncoderDecoderModel,
ViTFeatureExtractor,
)
except ImportError:
raise ImportError(
"Please install extra dependencies that are required for "
"the PptxReader: "
"`pip install torch transformers python-pptx Pillow`"
)
model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
feature_extractor = ViTFeatureExtractor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
self.parser_config = {
"feature_extractor": feature_extractor,
"model": model,
"tokenizer": tokenizer,
}
def caption_image(self, tmp_image_file: str) -> str:
"""Generate text caption of image."""
from PIL import Image
model = self.parser_config["model"]
feature_extractor = self.parser_config["feature_extractor"]
tokenizer = self.parser_config["tokenizer"]
device = infer_torch_device()
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
i_image = Image.open(tmp_image_file)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
pixel_values = feature_extractor(
images=[i_image], return_tensors="pt"
).pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
return preds[0].strip()
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
fs: Optional[AbstractFileSystem] = None,
) -> List[Document]:
"""Parse file."""
from pptx import Presentation
if fs:
with fs.open(file) as f:
presentation = Presentation(f)
else:
presentation = Presentation(file)
result = ""
for i, slide in enumerate(presentation.slides):
result += f"\n\nSlide #{i}: \n"
for shape in slide.shapes:
if hasattr(shape, "image"):
image = shape.image
# get image "file" contents
image_bytes = image.blob
# temporarily save the image to feed into model
f = tempfile.NamedTemporaryFile("wb", delete=False)
try:
f.write(image_bytes)
f.close()
result += f"\n Image: {self.caption_image(f.name)}\n\n"
finally:
os.unlink(f.name)
if hasattr(shape, "text"):
result += f"{shape.text}\n"
return [Document(text=result, metadata=extra_info or {})]
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5": "kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0,<8.0.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"fsspec": "fsspec<2023.10.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5": "kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0,<8.0.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
import pytest
import os
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from llama_index.embeddings.nvidia.base import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield val
finally:
if val:
os.environ[var] = val
@pytest.fixture(params=[Interface])
def public_class(request: pytest.FixtureRequest) -> type:
return request.param
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
mode = get_mode(metafunc.config)
if "model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in Interface(**mode).available_models]
metafunc.parametrize("model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
import pytest
import os
from llama_index.embeddings.nvidia import NVIDIAEmbedding as Interface
from llama_index.embeddings.nvidia.base import DEFAULT_MODEL
from typing import Generator
# this fixture is used to mask the NVIDIA_API_KEY environment variable and restore it
# after the test. it also returns the value of the NVIDIA_API_KEY environment variable
# before it was masked so that it can be used in the test.
@pytest.fixture()
def masked_env_var() -> Generator[str, None, None]:
var = "NVIDIA_API_KEY"
try:
if val := os.environ.get(var, None):
del os.environ[var]
yield val
finally:
if val:
os.environ[var] = val
def pytest_collection_modifyitems(config, items):
if "NVIDIA_API_KEY" not in os.environ:
skip_marker = pytest.mark.skip(
reason="requires NVIDIA_API_KEY environment variable or --nim-endpoint option"
)
for item in items:
if "integration" in item.keywords and not config.getoption(
"--nim-endpoint"
):
item.add_marker(skip_marker)
def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption(
"--all-models",
action="store_true",
help="Run tests across all models",
)
parser.addoption(
"--model-id",
action="store",
help="Run tests for a specific chat model",
)
parser.addoption(
"--nim-endpoint",
type=str,
help="Run tests using NIM mode",
)
def get_mode(config: pytest.Config) -> dict:
nim_endpoint = config.getoption("--nim-endpoint")
if nim_endpoint:
return {"base_url": nim_endpoint}
return {}
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
mode = get_mode(metafunc.config)
if "model" in metafunc.fixturenames:
models = [DEFAULT_MODEL]
if model := metafunc.config.getoption("--model-id"):
models = [model]
elif metafunc.config.getoption("--all-models"):
models = [model.id for model in Interface(**mode).available_models]
metafunc.parametrize("model", models, ids=models)
@pytest.fixture()
def mode(request: pytest.FixtureRequest) -> dict:
return get_mode(request.config)
|
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
import logging
import sys
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
# test whether suppress root handlers
logging.root.handlers.append(logging.StreamHandler(sys.stdout))
with JinaLogger('test_logger', suppress_root_logging=False) as logger:
log(logger)
assert len(logging.root.handlers) > 0
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn, encoding='utf-8') as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
import glob
import os
import pytest
from jina import Document, Flow
from jina.constants import __uptime__, __windows__
from jina.enums import LogVerbosity
from jina.helper import colored
from jina.logging.logger import JinaLogger
cur_dir = os.path.dirname(os.path.abspath(__file__))
def log(logger: JinaLogger):
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_color_log():
with JinaLogger('test_logger') as logger:
logger.debug('this is test debug message')
logger.info('this is test info message')
logger.info(f'this is test {colored("color", "red")} message')
logger.success('this is test success message')
logger.warning('this is test warning message')
logger.error('this is test error message')
logger.critical('this is test critical message')
def test_logging_syslog():
with JinaLogger(
'test_logger', log_config=os.path.join(cur_dir, 'yaml/syslog.yml')
) as logger:
log(logger)
assert len(logger.handlers) == 0 if __windows__ else 1
def test_logging_default():
import logging
import sys
with JinaLogger('test_logger') as logger:
log(logger)
assert len(logger.handlers) == 1
# test whether suppress root handlers
logging.root.handlers.append(logging.StreamHandler(sys.stdout))
with JinaLogger('test_logger', suppress_root_logging=False) as logger:
log(logger)
assert len(logging.root.handlers) > 0
def test_logging_level_yaml(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
fn = os.path.join(cur_dir, f'jina-{__uptime__}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
if os.path.exists(fn):
os.remove(fn)
log(file_logger)
assert file_logger.logger.level == LogVerbosity.from_string('INFO')
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
def test_logging_file(monkeypatch):
monkeypatch.delenv('JINA_LOG_LEVEL', raising=True) # ignore global env
uptime = __uptime__.replace(':', '.') if __windows__ else __uptime__
fn = os.path.join(cur_dir, f'jina-{uptime}.log')
with JinaLogger(
'test_file_logger', log_config=os.path.join(cur_dir, 'yaml/file.yml')
) as file_logger:
log(file_logger)
assert os.path.exists(fn)
with open(fn) as fp:
assert len(fp.readlines()) == 5
for f in glob.glob(cur_dir + '/*.log'):
os.remove(f)
@pytest.mark.slow
def test_logging_quiet(caplog):
# no way to capture logs in multiprocessing
# see discussion here: https://github.com/pytest-dev/pytest/issues/3037#issuecomment-745050393
f = Flow().add(quiet=True).add()
with f:
f.index(Document())
|
import numpy as np
from mmdet.core.evaluation.mean_ap import (eval_map, tpfp_default,
tpfp_imagenet, tpfp_openimages)
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_tpfp_imagenet():
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_tpfp_default():
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_eval_map():
# 2 image and 2 classes
det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info, gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
eval_map(det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
def test_tpfp_openimages():
det_bboxes = np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98],
[10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97],
[30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]])
gt_bboxes = np.array([[10., 10., 30., 30.], [30., 30., 50., 50.]])
gt_groups_of = np.array([True, False], dtype=np.bool)
gt_ignore = np.zeros((0, 4))
# Open Images evaluation using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 4)
assert fp.shape == (1, 4)
assert cls_dets.shape == (4, 5)
assert (tp == np.array([[0, 1, 0, 1]])).all()
assert (fp == np.array([[1, 0, 1, 0]])).all()
cls_dets_gt = np.array([[28., 28., 35., 35., 0.97],
[30., 30., 51., 51., 0.96],
[100., 110., 120., 130., 0.15],
[10., 10., 15., 15., 1.]])
assert (cls_dets == cls_dets_gt).all()
# Open Images evaluation not using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=False,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 6)
assert fp.shape == (1, 6)
assert cls_dets.shape == (6, 5)
# Open Images evaluation using group of, and gt is all group of bboxes.
gt_groups_of = np.array([True, True], dtype=np.bool)
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert cls_dets.shape == (3, 5)
# Open Images evaluation with empty gt.
gt_bboxes = np.zeros((0, 4))
gt_groups_of = np.empty((0))
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
fp = result[1]
assert (fp == np.array([[1, 1, 1, 1, 1, 1]])).all()
|
import numpy as np
from mmdet.core.evaluation.mean_ap import eval_map, tpfp_default, tpfp_imagenet
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_tpfp_imagenet():
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_tpfp_default():
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_eval_map():
# 2 image and 2 classes
det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info, gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
eval_map(det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
import logging
import os
import sys
import tarfile
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2]) * 1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = "msmarco-data"
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, "collection.tsv")
dev_queries_file = os.path.join(data_folder, "queries.dev.small.tsv")
qrels_filepath = os.path.join(data_folder, "qrels.dev.tsv")
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, "collectionandqueries.tar.gz")
if not os.path.exists(tar_filepath):
logging.info("Download: " + tar_filepath)
util.http_get(
"https://msmarco.z22.web.core.windows.net/msmarcoranking/collectionandqueries.tar.gz", tar_filepath
)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get("https://msmarco.z22.web.core.windows.net/msmarcoranking/qrels.dev.tsv", qrels_filepath)
### Load data
corpus = {} # Our corpus pid => passage
dev_queries = {} # Our dev queries. qid => query
dev_rel_docs = {} # Mapping qid => set with relevant pids
needed_pids = set() # Passage IDs we need
needed_qids = set() # Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding="utf8") as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split("\t")
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding="utf8") as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(
dev_queries,
corpus,
dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev",
)
ir_evaluator(model)
|
"""
This script runs the evaluation of an SBERT msmarco model on the
MS MARCO dev dataset and reports different performances metrices for cossine similarity & dot-product.
Usage:
python eval_msmarco.py model_name [max_corpus_size_in_thousands]
"""
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation, util, models
import logging
import sys
import os
import tarfile
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
#Name of the SBERT model
model_name = sys.argv[1]
# You can limit the approx. max size of the corpus. Pass 100 as second parameter and the corpus has a size of approx 100k docs
corpus_max_size = int(sys.argv[2])*1000 if len(sys.argv) >= 3 else 0
#### Load model
model = SentenceTransformer(model_name)
### Data files
data_folder = 'msmarco-data'
os.makedirs(data_folder, exist_ok=True)
collection_filepath = os.path.join(data_folder, 'collection.tsv')
dev_queries_file = os.path.join(data_folder, 'queries.dev.small.tsv')
qrels_filepath = os.path.join(data_folder, 'qrels.dev.tsv')
### Download files if needed
if not os.path.exists(collection_filepath) or not os.path.exists(dev_queries_file):
tar_filepath = os.path.join(data_folder, 'collectionandqueries.tar.gz')
if not os.path.exists(tar_filepath):
logging.info("Download: "+tar_filepath)
util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/collectionandqueries.tar.gz', tar_filepath)
with tarfile.open(tar_filepath, "r:gz") as tar:
tar.extractall(path=data_folder)
if not os.path.exists(qrels_filepath):
util.http_get('https://msmarco.blob.core.windows.net/msmarcoranking/qrels.dev.tsv', qrels_filepath)
### Load data
corpus = {} #Our corpus pid => passage
dev_queries = {} #Our dev queries. qid => query
dev_rel_docs = {} #Mapping qid => set with relevant pids
needed_pids = set() #Passage IDs we need
needed_qids = set() #Query IDs we need
# Load the 6980 dev queries
with open(dev_queries_file, encoding='utf8') as fIn:
for line in fIn:
qid, query = line.strip().split("\t")
dev_queries[qid] = query.strip()
# Load which passages are relevant for which queries
with open(qrels_filepath) as fIn:
for line in fIn:
qid, _, pid, _ = line.strip().split('\t')
if qid not in dev_queries:
continue
if qid not in dev_rel_docs:
dev_rel_docs[qid] = set()
dev_rel_docs[qid].add(pid)
needed_pids.add(pid)
needed_qids.add(qid)
# Read passages
with open(collection_filepath, encoding='utf8') as fIn:
for line in fIn:
pid, passage = line.strip().split("\t")
passage = passage
if pid in needed_pids or corpus_max_size <= 0 or len(corpus) <= corpus_max_size:
corpus[pid] = passage.strip()
## Run evaluator
logging.info("Queries: {}".format(len(dev_queries)))
logging.info("Corpus: {}".format(len(corpus)))
ir_evaluator = evaluation.InformationRetrievalEvaluator(dev_queries, corpus, dev_rel_docs,
show_progress_bar=True,
corpus_chunk_size=100000,
precision_recall_at_k=[10, 100],
name="msmarco dev")
ir_evaluator(model)
|
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
from docarray.utils._internal.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa F401
__all__.append('TorchEmbedding')
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.embedding.tensorflow import ( # noqa F401
TensorFlowEmbedding,
)
__all__.append('TensorFlowEmbedding')
|
from docarray.typing.tensor.embedding.embedding import AnyEmbedding
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
__all__ = ['NdArrayEmbedding', 'AnyEmbedding']
from docarray.utils.misc import is_tf_available, is_torch_available
torch_available = is_torch_available()
if torch_available:
from docarray.typing.tensor.embedding.torch import TorchEmbedding # noqa F401
__all__.append('TorchEmbedding')
tf_available = is_tf_available()
if tf_available:
from docarray.typing.tensor.embedding.tensorflow import ( # noqa F401
TensorFlowEmbedding,
)
__all__.append('TensorFlowEmbedding')
|
# Licensed to the LF AI & Data foundation under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
from docarray.array.doc_vec.doc_vec import DocVec
__all__ = ['DocList', 'DocVec', 'AnyDocArray']
|
from docarray.array.any_array import AnyDocArray
from docarray.array.doc_list.doc_list import DocList
from docarray.array.doc_vec.doc_vec import DocVec
__all__ = ['DocList', 'DocVec', 'AnyDocArray']
|
import pytest
from typing import Dict, List
from docarray import BaseDoc, DocList
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
class MySimpleDoc(BaseDoc):
title: str
class MyComplexDoc(BaseDoc):
content_dict_doclist: Dict[str, DocList[MySimpleDoc]]
content_dict_list: Dict[str, List[MySimpleDoc]]
aux_dict: Dict[str, int]
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_bytes(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_bytes(protocol=protocol, compress=compress)
d2 = MyDoc.from_bytes(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_base64(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_base64(protocol=protocol, compress=compress)
d2 = MyDoc.from_base64(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_bytes_complex(protocol, compress):
d = MyComplexDoc(
content_dict_doclist={
'test1': DocList[MySimpleDoc](
[MySimpleDoc(title='123'), MySimpleDoc(title='456')]
)
},
content_dict_list={
'test1': [MySimpleDoc(title='123'), MySimpleDoc(title='456')]
},
aux_dict={'a': 0},
)
bstr = d.to_bytes(protocol=protocol, compress=compress)
d2 = MyComplexDoc.from_bytes(bstr, protocol=protocol, compress=compress)
assert d2.aux_dict == {'a': 0}
assert len(d2.content_dict_doclist['test1']) == 2
assert d2.content_dict_doclist['test1'][0].title == '123'
assert d2.content_dict_doclist['test1'][1].title == '456'
assert len(d2.content_dict_list['test1']) == 2
assert d2.content_dict_list['test1'][0].title == '123'
assert d2.content_dict_list['test1'][1].title == '456'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_base64_complex(protocol, compress):
d = MyComplexDoc(
content_dict_doclist={
'test1': DocList[MySimpleDoc](
[MySimpleDoc(title='123'), MySimpleDoc(title='456')]
)
},
content_dict_list={
'test1': [MySimpleDoc(title='123'), MySimpleDoc(title='456')]
},
aux_dict={'a': 0},
)
bstr = d.to_base64(protocol=protocol, compress=compress)
d2 = MyComplexDoc.from_base64(bstr, protocol=protocol, compress=compress)
assert d2.aux_dict == {'a': 0}
assert len(d2.content_dict_doclist['test1']) == 2
assert d2.content_dict_doclist['test1'][0].title == '123'
assert d2.content_dict_doclist['test1'][1].title == '456'
assert len(d2.content_dict_list['test1']) == 2
assert d2.content_dict_list['test1'][0].title == '123'
assert d2.content_dict_list['test1'][1].title == '456'
|
import pytest
from docarray import BaseDoc
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_bytes(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_bytes(protocol=protocol, compress=compress)
d2 = MyDoc.from_bytes(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
@pytest.mark.parametrize('protocol', ['protobuf', 'pickle'])
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_to_from_base64(protocol, compress):
d = MyDoc(embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png'))
assert d.text == 'hello'
assert d.embedding.tolist() == [1, 2, 3, 4, 5]
assert d.image.url == 'aux.png'
bstr = d.to_base64(protocol=protocol, compress=compress)
d2 = MyDoc.from_base64(bstr, protocol=protocol, compress=compress)
assert d2.text == 'hello'
assert d2.embedding.tolist() == [1, 2, 3, 4, 5]
assert d2.image.url == 'aux.png'
|
import os
import time
import uuid
from contextlib import contextmanager
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config):
old_environ = dict(os.environ)
os.environ["HF_TOKEN"] = CI_HUB_USER_TOKEN
yield
os.environ.clear()
os.environ.update(old_environ)
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
import time
import uuid
from contextlib import contextmanager
from pathlib import Path
from typing import Optional
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder, RepositoryNotFoundError
CI_HUB_USER = "__DUMMY_TRANSFORMERS_USER__"
CI_HUB_USER_FULL_NAME = "Dummy User"
CI_HUB_USER_TOKEN = "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"
CI_HUB_ENDPOINT = "https://hub-ci.huggingface.co"
CI_HUB_DATASETS_URL = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
CI_HUB_TOKEN_PATH = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def ci_hfh_hf_hub_url(monkeypatch):
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", CI_HFH_HUGGINGFACE_CO_URL_TEMPLATE
)
@pytest.fixture
def ci_hub_config(monkeypatch):
monkeypatch.setattr("datasets.config.HF_ENDPOINT", CI_HUB_ENDPOINT)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", CI_HUB_DATASETS_URL)
@pytest.fixture
def ci_hub_token_path(monkeypatch):
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", CI_HUB_TOKEN_PATH)
@pytest.fixture
def set_ci_hub_access_token(ci_hub_config, ci_hub_token_path):
HfFolder.save_token(CI_HUB_USER_TOKEN)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def hf_api():
return HfApi(endpoint=CI_HUB_ENDPOINT)
@pytest.fixture(scope="session")
def hf_token():
yield CI_HUB_USER_TOKEN
@pytest.fixture
def cleanup_repo(hf_api):
def _cleanup_repo(repo_id):
hf_api.delete_repo(repo_id, token=CI_HUB_USER_TOKEN, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def temporary_repo(cleanup_repo):
@contextmanager
def _temporary_repo(repo_id: Optional[str] = None):
repo_id = repo_id or f"{CI_HUB_USER}/test-dataset-{uuid.uuid4().hex[:6]}-{int(time.time() * 10e3)}"
try:
yield repo_id
finally:
try:
cleanup_repo(repo_id)
except RepositoryNotFoundError:
pass
return _temporary_repo
@pytest.fixture(scope="session")
def hf_private_dataset_repo_txt_data_(hf_api: HfApi, hf_token, text_file):
repo_name = f"repo_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(text_file),
path_in_repo="data/text_data.txt",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_txt_data(hf_private_dataset_repo_txt_data_, ci_hub_config, ci_hfh_hf_hub_url):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_txt_data_(hf_api: HfApi, hf_token, zip_csv_with_dir_path):
repo_name = f"repo_zipped_txt_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_csv_with_dir_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_txt_data(
hf_private_dataset_repo_zipped_txt_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def hf_private_dataset_repo_zipped_img_data_(hf_api: HfApi, hf_token, zip_image_path):
repo_name = f"repo_zipped_img_data-{int(time.time() * 10e6)}"
repo_id = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset", private=True)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=str(zip_image_path),
path_in_repo="data.zip",
repo_id=repo_id,
repo_type="dataset",
)
yield repo_id
try:
hf_api.delete_repo(repo_id, token=hf_token, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def hf_private_dataset_repo_zipped_img_data(
hf_private_dataset_repo_zipped_img_data_, ci_hub_config, ci_hfh_hf_hub_url
):
return hf_private_dataset_repo_zipped_img_data_
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((224, 224, 3), dtype=np.uint8)),
(np.ones((100, 100, 3), dtype=np.uint8)),
(np.ones((50, 40, 3), dtype=np.uint8))
])
def test_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=[Document(blob=arr_in)],
return_results=True
)
results_arr = DocumentArray(resp[0].data.docs)
assert len(results_arr) == 1
assert results_arr[0].embedding is not None
assert results_arr[0].embedding.shape == (512, )
def test_with_batch():
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=(Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)),
return_results=True
)
assert len(resp[0].docs.get_attributes('embedding')) == 25
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 11], ['c', 0], ['cc', 0]], 'r'),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [['r', 0], ['c', 11], ['cc', 0]], 'c'),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [['r', 0], ['c', 0], ['cc', 11]], 'cc')
]
)
def test_traversal_paths(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
return len(DocumentArray(res[0].docs).traverse_flat([path]).get_attributes('embedding')) == count
return validate
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True
)
assert validate_traversal(docs_per_path)(resp)
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
try:
from torch_encoder import ImageTorchEncoder
except:
from jinahub.image.encoder.torch_encoder import ImageTorchEncoder
@pytest.mark.parametrize('arr_in', [
(np.ones((224, 224, 3), dtype=np.uint8)),
(np.ones((100, 100, 3), dtype=np.uint8)),
(np.ones((50, 40, 3), dtype=np.uint8))
])
def test_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=[Document(blob=arr_in)],
return_results=True
)
results_arr = DocumentArray(resp[0].data.docs)
assert len(results_arr) == 1
assert results_arr[0].embedding is not None
assert results_arr[0].embedding.shape == (512, )
def test_with_batch():
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=(Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)),
return_results=True
)
assert len(resp[0].docs.get_attributes('embedding')) == 25
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [['r', 11], ['c', 0], ['cc', 0]], 'r'),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [['r', 0], ['c', 11], ['cc', 0]], 'c'),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [['r', 0], ['c', 0], ['cc', 11]], 'cc')
]
)
def test_traversal_paths(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: str):
def validate_traversal(expected_docs_per_path: List[List[str]]):
def validate(res):
for path, count in expected_docs_per_path:
return len(DocumentArray(res[0].docs).traverse_flat([path]).get_attributes('embedding')) == count
return validate
flow = Flow().add(uses=ImageTorchEncoder)
with flow:
resp = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': [traversal_paths]},
return_results=True
)
assert validate_traversal(docs_per_path)(resp)
|
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be divisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
import csv
import gzip
import logging
import os
from datetime import datetime
import torch
from sentence_transformers import InputExample, LoggingHandler, SentenceTransformer, losses, models, util
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 16
pos_neg_ratio = 8 # batch_size must be divisible by pos_neg_ratio
epochs = 1
max_seq_length = 75
# Save path to store our model
model_save_path = "output/train_stsb_ct-{}-{}".format(model_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
################# Train sentences #################
# We use 1 Million sentences from Wikipedia to train our model
wikipedia_dataset_path = "data/wiki1m_for_simcse.txt"
if not os.path.exists(wikipedia_dataset_path):
util.http_get(
"https://huggingface.co/datasets/princeton-nlp/datasets-for-simcse/resolve/main/wiki1m_for_simcse.txt",
wikipedia_dataset_path,
)
# train_sentences are simply your list of sentences
train_sentences = []
with open(wikipedia_dataset_path, "r", encoding="utf8") as fIn:
for line in fIn:
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
################# Download and load STSb #################
data_folder = "data/stsbenchmark"
sts_dataset_path = f"{data_folder}/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row["sentence1"], row["sentence2"]], label=score)
if row["split"] == "dev":
dev_samples.append(inp_example)
elif row["split"] == "test":
test_samples.append(inp_example)
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name="sts-dev")
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name="sts-test")
################# Initialize an SBERT model #################
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# For ContrastiveTension we need a special data loader to construct batches with the desired properties
train_dataloader = losses.ContrastiveTensionDataLoader(
train_sentences, batch_size=batch_size, pos_neg_ratio=pos_neg_ratio
)
# As loss, we losses.ContrastiveTensionLoss
train_loss = losses.ContrastiveTensionLoss(model)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=1,
evaluation_steps=1000,
weight_decay=0,
warmup_steps=0,
optimizer_class=torch.optim.RMSprop,
optimizer_params={"lr": 1e-5},
output_path=model_save_path,
use_amp=False, # Set to True, if your GPU has optimized FP16 cores
)
########### Load the model and evaluate on test set
model = SentenceTransformer(model_save_path)
test_evaluator(model)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .manager import ManagerMeta, ManagerMixin
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint', 'ManagerMeta', 'ManagerMixin'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .hub import load_url
from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
find_latest_checkpoint, has_method,
import_modules_from_strings, is_list_of,
is_method_overridden, is_seq_of, is_str, is_tuple_of,
iter_cast, list_cast, mmcv_full_available,
requires_executable, requires_package, slice_list,
to_1tuple, to_2tuple, to_3tuple, to_4tuple, to_ntuple,
tuple_cast)
from .parrots_wrapper import TORCH_VERSION
from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
scandir, symlink)
from .version_utils import digit_version, get_git_hash
__all__ = [
'is_str', 'iter_cast', 'list_cast', 'tuple_cast', 'is_seq_of',
'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
'check_prerequisites', 'requires_package', 'requires_executable',
'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist', 'symlink',
'scandir', 'deprecated_api_warning', 'import_modules_from_strings',
'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
'is_method_overridden', 'has_method', 'mmcv_full_available',
'digit_version', 'get_git_hash', 'TORCH_VERSION', 'load_url',
'find_latest_checkpoint'
]
|
from typing import Iterable, Dict, Sequence
import math
import numpy as np
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
MAX_ES_RETURNED_DOCS = 10000
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {col: doc.tags.get(col) for col, _ in self._config.columns}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
# Handle if doc len is more than MAX_ES_RETURNED_DOCS
nrof_arr_split = math.ceil(len(ids) / self.MAX_ES_RETURNED_DOCS)
split_ids = np.array_split(ids, nrof_arr_split)
for split in split_ids:
es_docs = self._client.mget(index=self._config.index_name, ids=split)[
'docs'
]
docs = [
Document.from_base64(doc['_source']['blob'])
for doc in es_docs
if doc['found'] is True
]
docs_id_not_found = [doc['_id'] for doc in es_docs if doc['found'] is False]
accumulated_docs.extend(docs)
accumulated_docs_id_not_found.extend(docs_id_not_found)
if len(accumulated_docs_id_not_found) > 0:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
from typing import Iterable, Dict
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
from docarray import Document
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayElastic``"""
def _document_to_elastic(self, doc: 'Document') -> Dict:
extra_columns = {col: doc.tags.get(col) for col, _ in self._config.columns}
request = {
'_op_type': 'index',
'_id': doc.id,
'_index': self._config.index_name,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if self._config.tag_indices:
for index in self._config.tag_indices:
request[index] = doc.tags.get(index)
if doc.text:
request['text'] = doc.text
return request
def _getitem(self, doc_id: str) -> 'Document':
"""Helper method for getting item with elastic as storage
:param doc_id: id of the document
:raises KeyError: raise error when elastic id does not exist in storage
:return: Document
"""
try:
result = self._client.get(index=self._config.index_name, id=doc_id)
doc = Document.from_base64(result['_source']['blob'])
return doc
except Exception as ex:
raise KeyError(doc_id) from ex
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from elastic
"""
return self._getitem(_id)
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
if _id != value.id:
self._del_doc_by_id(_id)
request = [self._document_to_elastic(value)]
self._send_requests(request)
self._refresh(self._config.index_name)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._set_doc_by_id(_id, doc)
self._refresh(self._config.index_name)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(index=self._config.index_name, id=_id)
self._refresh(self._config.index_name)
def _clear_storage(self):
"""Concrete implementation of base class' ``_clear_storage``"""
self._client.indices.delete(index=self._config.index_name)
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_cogview4 import CogView4Transformer2DModel
from .transformer_cosmos import CosmosTransformer3DModel
from .transformer_easyanimate import EasyAnimateTransformer3DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hidream_image import HiDreamImageTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_hunyuan_video_framepack import HunyuanVideoFramepackTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_lumina2 import Lumina2Transformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
from .transformer_wan import WanTransformer3DModel
|
from ...utils import is_torch_available
if is_torch_available():
from .auraflow_transformer_2d import AuraFlowTransformer2DModel
from .cogvideox_transformer_3d import CogVideoXTransformer3DModel
from .consisid_transformer_3d import ConsisIDTransformer3DModel
from .dit_transformer_2d import DiTTransformer2DModel
from .dual_transformer_2d import DualTransformer2DModel
from .hunyuan_transformer_2d import HunyuanDiT2DModel
from .latte_transformer_3d import LatteTransformer3DModel
from .lumina_nextdit2d import LuminaNextDiT2DModel
from .pixart_transformer_2d import PixArtTransformer2DModel
from .prior_transformer import PriorTransformer
from .sana_transformer import SanaTransformer2DModel
from .stable_audio_transformer import StableAudioDiTModel
from .t5_film_transformer import T5FilmDecoder
from .transformer_2d import Transformer2DModel
from .transformer_allegro import AllegroTransformer3DModel
from .transformer_cogview3plus import CogView3PlusTransformer2DModel
from .transformer_cogview4 import CogView4Transformer2DModel
from .transformer_easyanimate import EasyAnimateTransformer3DModel
from .transformer_flux import FluxTransformer2DModel
from .transformer_hidream_image import HiDreamImageTransformer2DModel
from .transformer_hunyuan_video import HunyuanVideoTransformer3DModel
from .transformer_hunyuan_video_framepack import HunyuanVideoFramepackTransformer3DModel
from .transformer_ltx import LTXVideoTransformer3DModel
from .transformer_lumina2 import Lumina2Transformer2DModel
from .transformer_mochi import MochiTransformer3DModel
from .transformer_omnigen import OmniGenTransformer2DModel
from .transformer_sd3 import SD3Transformer2DModel
from .transformer_temporal import TransformerTemporalModel
from .transformer_wan import WanTransformer3DModel
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_execution_status_batch,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_execution_status_batch = exposed_run_and_wait(update_execution_status_batch)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
from backend.data.credit import get_user_credit_model
from backend.data.execution import (
ExecutionResult,
NodeExecutionEntry,
RedisExecutionEventBus,
create_graph_execution,
get_execution_results,
get_incomplete_executions,
get_latest_execution,
update_execution_status,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, expose, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
async def _spend_credits(entry: NodeExecutionEntry) -> int:
return await _user_credit_model.spend_credits(entry, 0, 0)
class DatabaseManager(AppService):
def __init__(self):
super().__init__()
self.use_db = True
self.use_redis = True
self.event_queue = RedisExecutionEventBus()
@classmethod
def get_port(cls) -> int:
return config.database_api_port
@expose
def send_execution_update(self, execution_result: ExecutionResult):
self.event_queue.publish(execution_result)
# Executions
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_execution_results = exposed_run_and_wait(get_execution_results)
get_incomplete_executions = exposed_run_and_wait(get_incomplete_executions)
get_latest_execution = exposed_run_and_wait(get_latest_execution)
update_execution_status = exposed_run_and_wait(update_execution_status)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
"""Rayyan review reader."""
import logging
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RayyanReader(BaseReader):
"""
Rayyan reader. Reads articles from a Rayyan review.
Args:
credentials_path (str): Rayyan credentials path.
rayyan_url (str, optional): Rayyan URL. Defaults to https://rayyan.ai.
Set to an alternative URL if you are using a non-production Rayyan instance.
"""
def __init__(
self, credentials_path: str, rayyan_url: str = "https://rayyan.ai"
) -> None:
"""Initialize Rayyan reader."""
from rayyan import Rayyan
from rayyan.user import User
logging.debug("Initializing Rayyan reader...")
self.rayyan = Rayyan(credentials_path, url=rayyan_url)
user = User(self.rayyan).get_info()
logging.info(f"Signed in successfully to Rayyan as: {user['displayName']}!")
def load_data(self, review_id: str, filters: dict = {}) -> List[Document]:
"""
Load articles from a review.
Args:
review_id (int): Rayyan review ID.
filters (dict, optional): Filters to apply to the review. Defaults to None. Passed to
the Rayyan review results method as is.
Returns:
List[Document]: List of documents.
"""
from tenacity import (
retry,
stop_after_attempt,
stop_after_delay,
stop_all,
wait_random_exponential,
)
from tqdm import tqdm
from rayyan.review import Review
rayyan_review = Review(self.rayyan)
my_review = rayyan_review.get(review_id)
logging.info(
f"Working on review: '{my_review['title']}' with {my_review['total_articles']} total articles."
)
result_params = {"start": 0, "length": 100}
result_params.update(filters)
@retry(
wait=wait_random_exponential(min=1, max=10),
stop=stop_all(stop_after_attempt(3), stop_after_delay(30)),
)
def fetch_results_with_retry():
logging.debug("Fetch parameters: %s", result_params)
return rayyan_review.results(review_id, result_params)
articles = []
logging.info("Fetching articles from Rayyan...")
total = my_review["total_articles"]
with tqdm(total=total) as pbar:
while len(articles) < total:
# retrieve articles in batches
review_results = fetch_results_with_retry()
fetched_articles = review_results["data"]
articles.extend(fetched_articles)
# update total in case filters are applied
if total != review_results["recordsFiltered"]:
total = review_results["recordsFiltered"]
pbar.total = total
result_params["start"] += len(fetched_articles)
pbar.update(len(fetched_articles))
results = []
for article in articles:
# iterate over all abstracts
abstracts = ""
if article["abstracts"] is not None:
abstracts_arr = [
abstract["content"] for abstract in article["abstracts"]
]
if len(abstracts_arr) > 0:
# map array into a string
abstracts = "\n".join(abstracts_arr)[0:1024].strip()
title = article["title"]
if title is not None:
title = title.strip()
body = f"{title}\n{abstracts}"
if body.strip() == "":
continue
extra_info = {"id": article["id"], "title": title}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
|
"""Rayyan review reader."""
import logging
from typing import List
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class RayyanReader(BaseReader):
"""Rayyan reader. Reads articles from a Rayyan review.
Args:
credentials_path (str): Rayyan credentials path.
rayyan_url (str, optional): Rayyan URL. Defaults to https://rayyan.ai.
Set to an alternative URL if you are using a non-production Rayyan instance.
"""
def __init__(
self, credentials_path: str, rayyan_url: str = "https://rayyan.ai"
) -> None:
"""Initialize Rayyan reader."""
from rayyan import Rayyan
from rayyan.user import User
logging.debug("Initializing Rayyan reader...")
self.rayyan = Rayyan(credentials_path, url=rayyan_url)
user = User(self.rayyan).get_info()
logging.info(f"Signed in successfully to Rayyan as: {user['displayName']}!")
def load_data(self, review_id: str, filters: dict = {}) -> List[Document]:
"""Load articles from a review.
Args:
review_id (int): Rayyan review ID.
filters (dict, optional): Filters to apply to the review. Defaults to None. Passed to
the Rayyan review results method as is.
Returns:
List[Document]: List of documents.
"""
from tenacity import (
retry,
stop_after_attempt,
stop_after_delay,
stop_all,
wait_random_exponential,
)
from tqdm import tqdm
from rayyan.review import Review
rayyan_review = Review(self.rayyan)
my_review = rayyan_review.get(review_id)
logging.info(
f"Working on review: '{my_review['title']}' with {my_review['total_articles']} total articles."
)
result_params = {"start": 0, "length": 100}
result_params.update(filters)
@retry(
wait=wait_random_exponential(min=1, max=10),
stop=stop_all(stop_after_attempt(3), stop_after_delay(30)),
)
def fetch_results_with_retry():
logging.debug("Fetch parameters: %s", result_params)
return rayyan_review.results(review_id, result_params)
articles = []
logging.info("Fetching articles from Rayyan...")
total = my_review["total_articles"]
with tqdm(total=total) as pbar:
while len(articles) < total:
# retrieve articles in batches
review_results = fetch_results_with_retry()
fetched_articles = review_results["data"]
articles.extend(fetched_articles)
# update total in case filters are applied
if total != review_results["recordsFiltered"]:
total = review_results["recordsFiltered"]
pbar.total = total
result_params["start"] += len(fetched_articles)
pbar.update(len(fetched_articles))
results = []
for article in articles:
# iterate over all abstracts
abstracts = ""
if article["abstracts"] is not None:
abstracts_arr = [
abstract["content"] for abstract in article["abstracts"]
]
if len(abstracts_arr) > 0:
# map array into a string
abstracts = "\n".join(abstracts_arr)[0:1024].strip()
title = article["title"]
if title is not None:
title = title.strip()
body = f"{title}\n{abstracts}"
if body.strip() == "":
continue
extra_info = {"id": article["id"], "title": title}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
|
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Utility that checks that modules like attention processors are listed in the documentation file.
```bash
python utils/check_support_list.py
```
It has no auto-fix mode.
"""
import os
import re
# All paths are set with the intent that you run this script from the root of the repo
REPO_PATH = "."
def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"):
"""
Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class.
Returns a list of documented class names (just the class name portion).
"""
with open(os.path.join(REPO_PATH, doc_path), "r") as f:
doctext = f.read()
matches = re.findall(autodoc_regex, doctext)
return [match.split(".")[-1] for match in matches]
def read_source_classes(src_path, class_regex, exclude_conditions=None):
"""
Reads class names from a source file using a regex that captures class definitions.
Optionally exclude classes based on a list of conditions (functions that take class name and return bool).
"""
if exclude_conditions is None:
exclude_conditions = []
with open(os.path.join(REPO_PATH, src_path), "r") as f:
doctext = f.read()
classes = re.findall(class_regex, doctext)
# Filter out classes that meet any of the exclude conditions
filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)]
return filtered_classes
def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None):
"""
Generic function to check if all classes defined in `src_path` are documented in `doc_path`.
Returns a set of undocumented class names.
"""
documented = set(read_documented_classes(doc_path, doc_regex))
source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions))
# Find which classes in source are not documented in a deterministic way.
undocumented = sorted(source_classes - documented)
return undocumented
if __name__ == "__main__":
# Define the checks we need to perform
checks = {
"Attention Processors": {
"doc_path": "docs/source/en/api/attnprocessor.md",
"src_path": "src/diffusers/models/attention_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
"exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"],
},
"Image Processors": {
"doc_path": "docs/source/en/api/image_processor.md",
"src_path": "src/diffusers/image_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
},
"Activations": {
"doc_path": "docs/source/en/api/activations.md",
"src_path": "src/diffusers/models/activations.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
},
"Normalizations": {
"doc_path": "docs/source/en/api/normalization.md",
"src_path": "src/diffusers/models/normalization.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
"exclude_conditions": [
# Exclude LayerNorm as it's an intentional exception
lambda c: c == "LayerNorm"
],
},
"LoRA Mixins": {
"doc_path": "docs/source/en/api/loaders/lora.md",
"src_path": "src/diffusers/loaders/lora_pipeline.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+LoraLoaderMixin(?:\d*_?\d*))[:(]",
},
}
missing_items = {}
for category, params in checks.items():
undocumented = check_documentation(
doc_path=params["doc_path"],
src_path=params["src_path"],
doc_regex=params["doc_regex"],
src_regex=params["src_regex"],
exclude_conditions=params.get("exclude_conditions"),
)
if undocumented:
missing_items[category] = undocumented
# If we have any missing items, raise a single combined error
if missing_items:
error_msg = ["Some classes are not documented properly:\n"]
for category, classes in missing_items.items():
error_msg.append(f"- {category}: {', '.join(sorted(classes))}")
raise ValueError("\n".join(error_msg))
|
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
"""
Utility that checks that modules like attention processors are listed in the documentation file.
```bash
python utils/check_support_list.py
```
It has no auto-fix mode.
"""
import os
import re
# All paths are set with the intent that you run this script from the root of the repo
REPO_PATH = "."
def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"):
"""
Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class.
Returns a list of documented class names (just the class name portion).
"""
with open(os.path.join(REPO_PATH, doc_path), "r") as f:
doctext = f.read()
matches = re.findall(autodoc_regex, doctext)
return [match.split(".")[-1] for match in matches]
def read_source_classes(src_path, class_regex, exclude_conditions=None):
"""
Reads class names from a source file using a regex that captures class definitions.
Optionally exclude classes based on a list of conditions (functions that take class name and return bool).
"""
if exclude_conditions is None:
exclude_conditions = []
with open(os.path.join(REPO_PATH, src_path), "r") as f:
doctext = f.read()
classes = re.findall(class_regex, doctext)
# Filter out classes that meet any of the exclude conditions
filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)]
return filtered_classes
def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None):
"""
Generic function to check if all classes defined in `src_path` are documented in `doc_path`.
Returns a set of undocumented class names.
"""
documented = set(read_documented_classes(doc_path, doc_regex))
source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions))
# Find which classes in source are not documented in a deterministic way.
undocumented = sorted(source_classes - documented)
return undocumented
if __name__ == "__main__":
# Define the checks we need to perform
checks = {
"Attention Processors": {
"doc_path": "docs/source/en/api/attnprocessor.md",
"src_path": "src/diffusers/models/attention_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
"exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"],
},
"Image Processors": {
"doc_path": "docs/source/en/api/image_processor.md",
"src_path": "src/diffusers/image_processor.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]",
},
"Activations": {
"doc_path": "docs/source/en/api/activations.md",
"src_path": "src/diffusers/models/activations.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
},
"Normalizations": {
"doc_path": "docs/source/en/api/normalization.md",
"src_path": "src/diffusers/models/normalization.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):",
"exclude_conditions": [
# Exclude LayerNorm as it's an intentional exception
lambda c: c == "LayerNorm"
],
},
"LoRA Mixins": {
"doc_path": "docs/source/en/api/loaders/lora.md",
"src_path": "src/diffusers/loaders/lora_pipeline.py",
"doc_regex": r"\[\[autodoc\]\]\s([^\n]+)",
"src_regex": r"class\s+(\w+LoraLoaderMixin(?:\d*_?\d*))[:(]",
},
}
missing_items = {}
for category, params in checks.items():
undocumented = check_documentation(
doc_path=params["doc_path"],
src_path=params["src_path"],
doc_regex=params["doc_regex"],
src_regex=params["src_regex"],
exclude_conditions=params.get("exclude_conditions"),
)
if undocumented:
missing_items[category] = undocumented
# If we have any missing items, raise a single combined error
if missing_items:
error_msg = ["Some classes are not documented properly:\n"]
for category, classes in missing_items.items():
error_msg.append(f"- {category}: {', '.join(sorted(classes))}")
raise ValueError("\n".join(error_msg))
|
"""
Compute image embeddings
"""
import os
from PIL import Image
from sentence_transformers import SentenceTransformer, util
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../examples/applications/image-search/two_dogs_in_snow.jpg",
)
img_emb = model.encode(Image.open(image_filepath))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)[0]
assert abs(cos_scores[0] - 0.3069) < 0.01
assert abs(cos_scores[1] - 0.1010) < 0.01
assert abs(cos_scores[2] - 0.1086) < 0.01
|
"""
Compute image embeddings
"""
import os
from PIL import Image
from sentence_transformers import util, SentenceTransformer
def test_simple_encode(clip_vit_b_32_model: SentenceTransformer) -> None:
model = clip_vit_b_32_model
# Encode an image:
image_filepath = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../examples/applications/image-search/two_dogs_in_snow.jpg",
)
img_emb = model.encode(Image.open(image_filepath))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)[0]
assert abs(cos_scores[0] - 0.3069) < 0.01
assert abs(cos_scores[1] - 0.1010) < 0.01
assert abs(cos_scores[2] - 0.1086) < 0.01
|
import sys
import pytest
from hypothesis import given, settings, strategies
import xgboost as xgb
from xgboost import testing as tm
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_extmem_qdm(
n_samples_per_batch: int, n_features: int, n_batches: int, on_host: bool
) -> None:
check_extmem_qdm(n_samples_per_batch, n_features, n_batches, "cuda", on_host)
def test_concat_pages() -> None:
it = tm.IteratorForTest(*tm.make_batches(64, 16, 4, use_cupy=True), cache=None)
Xy = xgb.ExtMemQuantileDMatrix(it)
with pytest.raises(ValueError, match="can not be used with concatenated pages"):
booster = xgb.train(
{
"device": "cuda",
"subsample": 0.5,
"sampling_method": "gradient_based",
"extmem_concat_pages": True,
"objective": "reg:absoluteerror",
},
Xy,
)
@given(
strategies.integers(1, 64),
strategies.integers(1, 8),
strategies.integers(1, 4),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_quantile_objective(
n_samples_per_batch: int, n_features: int, n_batches: int
) -> None:
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"hist",
"cuda",
)
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"approx",
"cuda",
)
|
import sys
import pytest
from hypothesis import given, settings, strategies
from xgboost.testing import no_cupy
from xgboost.testing.updater import check_extmem_qdm, check_quantile_loss_extmem
sys.path.append("tests/python")
from test_data_iterator import run_data_iterator
from test_data_iterator import test_single_batch as cpu_single_batch
def test_gpu_single_batch() -> None:
cpu_single_batch("hist", "cuda")
@pytest.mark.skipif(**no_cupy())
@given(
strategies.integers(0, 1024),
strategies.integers(1, 7),
strategies.integers(0, 8),
strategies.booleans(),
strategies.booleans(),
strategies.booleans(),
)
@settings(deadline=None, max_examples=16, print_blob=True)
def test_gpu_data_iterator(
n_samples_per_batch: int,
n_features: int,
n_batches: int,
subsample: bool,
use_cupy: bool,
on_host: bool,
) -> None:
run_data_iterator(
n_samples_per_batch,
n_features,
n_batches,
"hist",
subsample=subsample,
device="cuda",
use_cupy=use_cupy,
on_host=on_host,
)
def test_cpu_data_iterator() -> None:
"""Make sure CPU algorithm can handle GPU inputs"""
run_data_iterator(
1024,
2,
3,
"approx",
device="cuda",
subsample=False,
use_cupy=True,
on_host=False,
)
@given(
strategies.integers(1, 2048),
strategies.integers(1, 8),
strategies.integers(1, 4),
strategies.booleans(),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_extmem_qdm(
n_samples_per_batch: int, n_features: int, n_batches: int, on_host: bool
) -> None:
check_extmem_qdm(n_samples_per_batch, n_features, n_batches, "cuda", on_host)
@given(
strategies.integers(1, 64),
strategies.integers(1, 8),
strategies.integers(1, 4),
)
@settings(deadline=None, max_examples=10, print_blob=True)
def test_quantile_objective(
n_samples_per_batch: int, n_features: int, n_batches: int
) -> None:
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"hist",
"cuda",
)
check_quantile_loss_extmem(
n_samples_per_batch,
n_features,
n_batches,
"approx",
"cuda",
)
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
class EmailCredentials(BaseModel):
smtp_server: str = SchemaField(
default="smtp.gmail.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
smtp_username: BlockSecret = SecretField(key="smtp_username")
smtp_password: BlockSecret = SecretField(key="smtp_password")
model_config = ConfigDict(title="Email Credentials")
class SendEmailBlock(Block):
class Input(BlockSchema):
to_email: str = SchemaField(
description="Recipient email address", placeholder="recipient@example.com"
)
subject: str = SchemaField(
description="Subject of the email", placeholder="Enter the email subject"
)
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
creds: EmailCredentials = SchemaField(
description="SMTP credentials",
default=EmailCredentials(),
)
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
error: str = SchemaField(
description="Error message if the email sending failed"
)
def __init__(self):
super().__init__(
disabled=True,
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
input_schema=SendEmailBlock.Input,
output_schema=SendEmailBlock.Output,
test_input={
"to_email": "recipient@example.com",
"subject": "Test Email",
"body": "This is a test email.",
"creds": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
"smtp_username": "your-email@gmail.com",
"smtp_password": "your-gmail-password",
},
},
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
creds: EmailCredentials, to_email: str, subject: str, body: str
) -> str:
smtp_server = creds.smtp_server
smtp_port = creds.smtp_port
smtp_username = creds.smtp_username.get_secret_value()
smtp_password = creds.smtp_password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
msg["To"] = to_email
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
with smtplib.SMTP(smtp_server, smtp_port) as server:
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(smtp_username, to_email, msg.as_string())
return "Email sent successfully"
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "status", self.send_email(
input_data.creds,
input_data.to_email,
input_data.subject,
input_data.body,
)
|
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pydantic import BaseModel, ConfigDict
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import BlockSecret, SchemaField, SecretField
class EmailCredentials(BaseModel):
smtp_server: str = SchemaField(
default="smtp.gmail.com", description="SMTP server address"
)
smtp_port: int = SchemaField(default=25, description="SMTP port number")
smtp_username: BlockSecret = SecretField(key="smtp_username")
smtp_password: BlockSecret = SecretField(key="smtp_password")
model_config = ConfigDict(title="Email Credentials")
class SendEmailBlock(Block):
class Input(BlockSchema):
to_email: str = SchemaField(
description="Recipient email address", placeholder="recipient@example.com"
)
subject: str = SchemaField(
description="Subject of the email", placeholder="Enter the email subject"
)
body: str = SchemaField(
description="Body of the email", placeholder="Enter the email body"
)
creds: EmailCredentials = SchemaField(
description="SMTP credentials",
default=EmailCredentials(),
)
class Output(BlockSchema):
status: str = SchemaField(description="Status of the email sending operation")
error: str = SchemaField(
description="Error message if the email sending failed"
)
def __init__(self):
super().__init__(
id="4335878a-394e-4e67-adf2-919877ff49ae",
description="This block sends an email using the provided SMTP credentials.",
categories={BlockCategory.OUTPUT},
input_schema=SendEmailBlock.Input,
output_schema=SendEmailBlock.Output,
test_input={
"to_email": "recipient@example.com",
"subject": "Test Email",
"body": "This is a test email.",
"creds": {
"smtp_server": "smtp.gmail.com",
"smtp_port": 25,
"smtp_username": "your-email@gmail.com",
"smtp_password": "your-gmail-password",
},
},
test_output=[("status", "Email sent successfully")],
test_mock={"send_email": lambda *args, **kwargs: "Email sent successfully"},
)
@staticmethod
def send_email(
creds: EmailCredentials, to_email: str, subject: str, body: str
) -> str:
smtp_server = creds.smtp_server
smtp_port = creds.smtp_port
smtp_username = creds.smtp_username.get_secret_value()
smtp_password = creds.smtp_password.get_secret_value()
msg = MIMEMultipart()
msg["From"] = smtp_username
msg["To"] = to_email
msg["Subject"] = subject
msg.attach(MIMEText(body, "plain"))
with smtplib.SMTP(smtp_server, smtp_port) as server:
server.starttls()
server.login(smtp_username, smtp_password)
server.sendmail(smtp_username, to_email, msg.as_string())
return "Email sent successfully"
def run(self, input_data: Input, **kwargs) -> BlockOutput:
yield "status", self.send_email(
input_data.creds,
input_data.to_email,
input_data.subject,
input_data.body,
)
|
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.chains.openai_functions.base import (
create_openai_fn_chain,
create_structured_output_chain,
)
from langchain.chains.openai_functions.citation_fuzzy_match import (
create_citation_fuzzy_match_chain,
create_citation_fuzzy_match_runnable,
)
from langchain.chains.openai_functions.extraction import (
create_extraction_chain,
create_extraction_chain_pydantic,
)
from langchain.chains.openai_functions.qa_with_structure import (
create_qa_with_sources_chain,
create_qa_with_structure_chain,
)
from langchain.chains.openai_functions.tagging import (
create_tagging_chain,
create_tagging_chain_pydantic,
)
from langchain.chains.structured_output.base import (
create_openai_fn_runnable,
create_structured_output_runnable,
get_openai_output_parser,
)
__all__ = [
"convert_to_openai_function",
"create_citation_fuzzy_match_chain",
"create_citation_fuzzy_match_runnable",
"create_extraction_chain",
"create_extraction_chain_pydantic",
"create_openai_fn_chain",
"create_openai_fn_runnable", # backwards compatibility
"create_qa_with_sources_chain",
"create_qa_with_structure_chain",
"create_structured_output_chain",
"create_structured_output_runnable", # backwards compatibility
"create_tagging_chain",
"create_tagging_chain_pydantic",
"get_openai_output_parser", # backwards compatibility
]
|
from langchain_core.utils.function_calling import convert_to_openai_function
from langchain.chains.openai_functions.base import (
create_openai_fn_chain,
create_structured_output_chain,
)
from langchain.chains.openai_functions.citation_fuzzy_match import (
create_citation_fuzzy_match_chain,
create_citation_fuzzy_match_runnable,
)
from langchain.chains.openai_functions.extraction import (
create_extraction_chain,
create_extraction_chain_pydantic,
)
from langchain.chains.openai_functions.qa_with_structure import (
create_qa_with_sources_chain,
create_qa_with_structure_chain,
)
from langchain.chains.openai_functions.tagging import (
create_tagging_chain,
create_tagging_chain_pydantic,
)
from langchain.chains.structured_output.base import (
create_openai_fn_runnable,
create_structured_output_runnable,
get_openai_output_parser,
)
__all__ = [
"convert_to_openai_function",
"create_tagging_chain",
"create_tagging_chain_pydantic",
"create_extraction_chain_pydantic",
"create_extraction_chain",
"create_citation_fuzzy_match_chain",
"create_citation_fuzzy_match_runnable",
"create_qa_with_structure_chain",
"create_qa_with_sources_chain",
"create_structured_output_chain",
"create_openai_fn_chain",
"create_structured_output_runnable", # backwards compatibility
"create_openai_fn_runnable", # backwards compatibility
"get_openai_output_parser", # backwards compatibility
]
|
from llama_index.vector_stores.couchbase.base import (
CouchbaseVectorStore,
CouchbaseSearchVectorStore,
)
__all__ = ["CouchbaseVectorStore", "CouchbaseSearchVectorStore"]
|
from llama_index.vector_stores.couchbase.base import CouchbaseVectorStore, CouchbaseSearchVectorStore
__all__ = ["CouchbaseVectorStore", "CouchbaseSearchVectorStore"]
|
import numpy as np
import pytest
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
class PipelineTest(testing.TestCase):
def test_basics(self):
run_training_check = False if backend.backend() == "numpy" else True
self.run_layer_test(
layers.Pipeline,
init_kwargs={
"layers": [layers.AutoContrast(), layers.RandomBrightness(0.1)],
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_mixed_precision_check=False,
run_training_check=run_training_check,
)
@pytest.mark.skipif(
backend.backend() == "numpy", reason="masking not working in numpy"
)
def test_correctness(self):
pipeline = layers.Pipeline([CanaryLayer(), CanaryLayer()])
x = np.array([0])
mask = np.array([0])
pipeline(x, training=True, mask=mask)
self.assertTrue(pipeline.layers[0].training)
self.assertTrue(pipeline.layers[0].received_mask)
self.assertTrue(pipeline.layers[1].training)
self.assertTrue(pipeline.layers[1].received_mask)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
|
import numpy as np
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class CanaryLayer(layers.Layer):
def __init__(self):
super().__init__()
self.training = None
self.received_mask = False
def call(self, x, training=False, mask=None):
self.training = training
if mask is not None:
self.received_mask = True
return x
def compute_mask(self, x, mask=None):
return x
def compute_output_shape(self, input_shape):
return input_shape
class PipelineTest(testing.TestCase):
def test_basics(self):
self.run_layer_test(
layers.Pipeline,
init_kwargs={
"layers": [layers.AutoContrast(), layers.RandomBrightness(0.1)],
},
input_shape=(8, 3, 4, 3),
supports_masking=False,
expected_output_shape=(8, 3, 4, 3),
run_mixed_precision_check=False,
)
def test_correctness(self):
pipeline = layers.Pipeline([CanaryLayer(), CanaryLayer()])
x = np.array([0])
mask = np.array([0])
pipeline(x, training=True, mask=mask)
self.assertTrue(pipeline.layers[0].training)
self.assertTrue(pipeline.layers[0].received_mask)
self.assertTrue(pipeline.layers[1].training)
self.assertTrue(pipeline.layers[1].received_mask)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (2, 10, 12, 3)
output_shape = (2, 8, 9, 3)
else:
input_shape = (2, 3, 10, 12)
output_shape = (2, 3, 8, 9)
layer = layers.Pipeline(
[
layers.AutoContrast(),
layers.CenterCrop(8, 9),
]
)
input_data = np.random.random(input_shape)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
self.assertEqual(tuple(output.shape), output_shape)
|
import pytest # type: ignore[import-not-found, import-not-found]
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest # type: ignore[import-not-found, import-not-found]
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
'jina-hubble-sdk>=0.10.0',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'jina-hubble-sdk>=0.10.0',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
import sys
from os import path
from setuptools import find_packages
from setuptools import setup
if sys.version_info < (3, 7, 0):
raise OSError(f'DocArray requires Python >=3.7, but yours is {sys.version}')
try:
pkg_name = 'docarray'
libinfo_py = path.join(pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r', encoding='utf8').readlines()
version_line = [l.strip() for l in libinfo_content if l.startswith('__version__')][
0
]
exec(version_line) # gives __version__
except FileNotFoundError:
__version__ = '0.0.0'
try:
with open('README.md', encoding='utf8') as fp:
_long_description = fp.read()
except FileNotFoundError:
_long_description = ''
setup(
name=pkg_name,
packages=find_packages(),
version=__version__,
include_package_data=True,
description='The data structure for unstructured data',
author='Jina AI',
author_email='hello@jina.ai',
license='Apache 2.0',
url='https://github.com/jina-ai/docarray',
download_url='https://github.com/jina-ai/docarray/tags',
long_description=_long_description,
long_description_content_type='text/markdown',
zip_safe=False,
setup_requires=['setuptools>=18.0', 'wheel'],
install_requires=['numpy', 'rich>=12.0.0'],
extras_require={
# req usage, please see https://docarray.jina.ai/#install
'common': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'fastapi',
'uvicorn',
],
'full': [
'protobuf>=3.13.0,<=3.20.1',
'lz4',
'requests',
'matplotlib',
'Pillow',
'trimesh',
'scipy',
'av',
'fastapi',
'uvicorn',
'strawberry-graphql',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'qdrant-client~=0.7.3',
'elasticsearch>=8.2.0',
],
'qdrant': [
'qdrant-client~=0.7.3',
],
'annlite': [
'annlite>=0.3.2',
],
'weaviate': [
'weaviate-client~=3.3.0',
],
'elasticsearch': [
'elasticsearch>=8.2.0',
],
'test': [
'pytest',
'pytest-timeout',
'pytest-mock',
'pytest-cov',
'pytest-repeat',
'pytest-reraise',
'mock',
'pytest-custom_exit_code',
'black==22.3.0',
'tensorflow==2.7.0',
'paddlepaddle==2.2.0',
'torch==1.9.0',
'torchvision==0.10.0',
'datasets',
'onnx',
'onnxruntime',
'jupyterlab',
'transformers>=4.16.2',
'weaviate-client~=3.3.0',
'annlite>=0.3.2',
'elasticsearch>=8.2.0',
'jina',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Unix Shell',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
project_urls={
'Documentation': 'https://docarray.jina.ai',
'Source': 'https://github.com/jina-ai/docarray/',
'Tracker': 'https://github.com/jina-ai/docarray/issues',
},
keywords='docarray deep-learning data-structures cross-modal multi-modal unstructured-data nested-data neural-search',
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmengine.config import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
ori_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
ori_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_dataset_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_hf_hub_url(repo_id, filename, revision):
url = hf_hub_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
__all__ = ['DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder']
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
__all__ = ['DropBlock']
|
def split_package(package: str) -> tuple[str, str]:
"""Split a package name into the containing package and the final name."""
parts = package.split(".")
return ".".join(parts[:-1]), parts[-1]
def dump_migrations_as_grit(name: str, migration_pairs: list[tuple[str, str]]) -> str:
"""Dump the migration pairs as a Grit file."""
remapped = ",\n".join(
[
f"""
[
`{split_package(from_module)[0]}`,
`{split_package(from_module)[1]}`,
`{split_package(to_module)[0]}`,
`{split_package(to_module)[1]}`
]
"""
for from_module, to_module in migration_pairs
],
)
pattern_name = f"langchain_migrate_{name}"
return f"""
language python
// This migration is generated automatically - do not manually edit this file
pattern {pattern_name}() {{
find_replace_imports(list=[
{remapped}
])
}}
// Add this for invoking directly
{pattern_name}()
"""
|
def split_package(package: str) -> tuple[str, str]:
"""Split a package name into the containing package and the final name."""
parts = package.split(".")
return ".".join(parts[:-1]), parts[-1]
def dump_migrations_as_grit(name: str, migration_pairs: list[tuple[str, str]]) -> str:
"""Dump the migration pairs as a Grit file."""
remapped = ",\n".join(
[
f"""
[
`{split_package(from_module)[0]}`,
`{split_package(from_module)[1]}`,
`{split_package(to_module)[0]}`,
`{split_package(to_module)[1]}`
]
"""
for from_module, to_module in migration_pairs
]
)
pattern_name = f"langchain_migrate_{name}"
return f"""
language python
// This migration is generated automatically - do not manually edit this file
pattern {pattern_name}() {{
find_replace_imports(list=[
{remapped}
])
}}
// Add this for invoking directly
{pattern_name}()
"""
|
from torchaudio._internal.module_utils import dropping_support
from ._alignment import forced_align as _forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
forced_align = dropping_support(_forced_align)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
frechet_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss as _rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
rnnt_loss = dropping_support(_rnnt_loss)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
"frechet_distance",
]
|
from torchaudio._internal.module_utils import dropping_support
from ._alignment import forced_align as _forced_align, merge_tokens, TokenSpan
from .filtering import (
allpass_biquad,
band_biquad,
bandpass_biquad,
bandreject_biquad,
bass_biquad,
biquad,
contrast,
dcshift,
deemph_biquad,
dither,
equalizer_biquad,
filtfilt,
flanger,
gain,
highpass_biquad,
lfilter,
lowpass_biquad,
overdrive,
phaser,
riaa_biquad,
treble_biquad,
vad,
)
forced_align = dropping_support(_forced_align)
from .functional import (
add_noise,
amplitude_to_DB,
apply_beamforming,
apply_codec,
compute_deltas,
convolve,
create_dct,
DB_to_amplitude,
deemphasis,
detect_pitch_frequency,
edit_distance,
fftconvolve,
frechet_distance,
griffinlim,
inverse_spectrogram,
linear_fbanks,
loudness,
mask_along_axis,
mask_along_axis_iid,
melscale_fbanks,
mu_law_decoding,
mu_law_encoding,
mvdr_weights_rtf,
mvdr_weights_souden,
phase_vocoder,
pitch_shift,
preemphasis,
psd,
resample,
rnnt_loss as _rnnt_loss,
rtf_evd,
rtf_power,
sliding_window_cmn,
spectral_centroid,
spectrogram,
speed,
)
rnnt_loss = dropping_support(_rnnt_loss)
__all__ = [
"amplitude_to_DB",
"compute_deltas",
"create_dct",
"melscale_fbanks",
"linear_fbanks",
"DB_to_amplitude",
"loudness",
"detect_pitch_frequency",
"griffinlim",
"mask_along_axis",
"mask_along_axis_iid",
"mu_law_encoding",
"mu_law_decoding",
"phase_vocoder",
"sliding_window_cmn",
"spectrogram",
"inverse_spectrogram",
"spectral_centroid",
"allpass_biquad",
"band_biquad",
"bandpass_biquad",
"bandreject_biquad",
"bass_biquad",
"biquad",
"contrast",
"dither",
"dcshift",
"deemph_biquad",
"equalizer_biquad",
"filtfilt",
"flanger",
"forced_align",
"merge_tokens",
"TokenSpan",
"gain",
"highpass_biquad",
"lfilter",
"lowpass_biquad",
"overdrive",
"phaser",
"riaa_biquad",
"treble_biquad",
"vad",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
"rnnt_loss",
"psd",
"mvdr_weights_souden",
"mvdr_weights_rtf",
"rtf_evd",
"rtf_power",
"apply_beamforming",
"fftconvolve",
"convolve",
"add_noise",
"speed",
"preemphasis",
"deemphasis",
"frechet_distance",
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import ConcatDataset, MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dod import DODDataset
from .dsdl import DSDLDetDataset
from .flickr30k import Flickr30kDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mdetr_style_refcoco import MDETRStyleRefCocoDataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .odvg import ODVGDataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
CustomSampleSizeSampler, GroupMultiSourceSampler,
MultiSourceSampler, TrackAspectRatioBatchSampler,
TrackImgSampler)
from .utils import get_loading_pipeline
from .v3det import V3DetDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset', 'V3DetDataset', 'ConcatDataset',
'ODVGDataset', 'MDETRStyleRefCocoDataset', 'DODDataset',
'CustomSampleSizeSampler', 'Flickr30kDataset'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .ade20k import (ADE20KInstanceDataset, ADE20KPanopticDataset,
ADE20KSegDataset)
from .base_det_dataset import BaseDetDataset
from .base_semseg_dataset import BaseSegDataset
from .base_video_dataset import BaseVideoDataset
from .cityscapes import CityscapesDataset
from .coco import CocoDataset
from .coco_caption import CocoCaptionDataset
from .coco_panoptic import CocoPanopticDataset
from .coco_semantic import CocoSegDataset
from .crowdhuman import CrowdHumanDataset
from .dataset_wrappers import ConcatDataset, MultiImageMixDataset
from .deepfashion import DeepFashionDataset
from .dsdl import DSDLDetDataset
from .isaid import iSAIDDataset
from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
from .mot_challenge_dataset import MOTChallengeDataset
from .objects365 import Objects365V1Dataset, Objects365V2Dataset
from .openimages import OpenImagesChallengeDataset, OpenImagesDataset
from .refcoco import RefCocoDataset
from .reid_dataset import ReIDDataset
from .samplers import (AspectRatioBatchSampler, ClassAwareSampler,
GroupMultiSourceSampler, MultiSourceSampler,
TrackAspectRatioBatchSampler, TrackImgSampler)
from .utils import get_loading_pipeline
from .v3det import V3DetDataset
from .voc import VOCDataset
from .wider_face import WIDERFaceDataset
from .xml_style import XMLDataset
from .youtube_vis_dataset import YouTubeVISDataset
__all__ = [
'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset',
'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset',
'WIDERFaceDataset', 'get_loading_pipeline', 'CocoPanopticDataset',
'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset',
'AspectRatioBatchSampler', 'ClassAwareSampler', 'MultiSourceSampler',
'GroupMultiSourceSampler', 'BaseDetDataset', 'CrowdHumanDataset',
'Objects365V1Dataset', 'Objects365V2Dataset', 'DSDLDetDataset',
'BaseVideoDataset', 'MOTChallengeDataset', 'TrackImgSampler',
'ReIDDataset', 'YouTubeVISDataset', 'TrackAspectRatioBatchSampler',
'ADE20KPanopticDataset', 'CocoCaptionDataset', 'RefCocoDataset',
'BaseSegDataset', 'ADE20KSegDataset', 'CocoSegDataset',
'ADE20KInstanceDataset', 'iSAIDDataset', 'V3DetDataset', 'ConcatDataset'
]
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio import AudioNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[AudioNdArray, int]:
"""
Load the Audio from the [`AudioBytes`][docarray.typing.AudioBytes] into an
[`AudioNdArray`][docarray.typing.AudioNdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[AudioNdArray]
bytes_: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes_ = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes_.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, AudioNdArray)
```
---
:return: tuple of an [`AudioNdArray`][docarray.typing.AudioNdArray] representing the
audio bytes content, and an integer representing the frame rate.
"""
pydub = import_library('pydub', raise_error=True) # noqa: F841
from pydub import AudioSegment
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return parse_obj_as(AudioNdArray, samples_norm), segment.frame_rate
|
import io
from typing import TYPE_CHECKING, Any, Tuple, Type, TypeVar
import numpy as np
from pydantic import parse_obj_as
from pydantic.validators import bytes_validator
from docarray.typing.abstract_type import AbstractType
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.audio import AudioNdArray
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
from pydantic.fields import BaseConfig, ModelField
from docarray.proto import NodeProto
T = TypeVar('T', bound='AudioBytes')
@_register_proto(proto_type_name='audio_bytes')
class AudioBytes(bytes, AbstractType):
"""
Bytes that store an audio and that can be load into an Audio tensor
"""
@classmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
value = bytes_validator(value)
return cls(value)
@classmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
return parse_obj_as(cls, pb_msg)
def _to_node_protobuf(self: T) -> 'NodeProto':
from docarray.proto import NodeProto
return NodeProto(blob=self, type=self._proto_type_name)
def load(self) -> Tuple[AudioNdArray, int]:
"""
Load the Audio from the [`AudioBytes`][docarray.typing.AudioBytes] into an
[`AudioNdArray`][docarray.typing.AudioNdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioBytes, AudioNdArray, AudioUrl
class MyAudio(BaseDoc):
url: AudioUrl
tensor: Optional[AudioNdArray]
bytes_: Optional[AudioBytes]
frame_rate: Optional[float]
doc = MyAudio(url='https://www.kozco.com/tech/piano2.wav')
doc.bytes_ = doc.url.load_bytes()
doc.tensor, doc.frame_rate = doc.bytes_.load()
# Note this is equivalent to do
doc.tensor, doc.frame_rate = doc.url.load()
assert isinstance(doc.tensor, AudioNdArray)
```
---
:return: tuple of an [`AudioNdArray`][docarray.typing.AudioNdArray] representing the
audio bytes content, and an integer representing the frame rate.
"""
pydub = import_library('pydub', raise_error=True) # noqa: F841
from pydub import AudioSegment
segment = AudioSegment.from_file(io.BytesIO(self))
# Convert to float32 using NumPy
samples = np.array(segment.get_array_of_samples())
# Normalise float32 array so that values are between -1.0 and +1.0
samples_norm = samples / 2 ** (segment.sample_width * 8 - 1)
return parse_obj_as(AudioNdArray, samples_norm), segment.frame_rate
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from mmengine.hooks import IterTimerHook
from mmengine.logging import MessageHub
def time_patch():
if not hasattr(time_patch, 'time'):
time_patch.time = 0
else:
time_patch.time += 1
return time_patch.time
class TestIterTimerHook(TestCase):
def setUp(self) -> None:
self.hook = IterTimerHook()
def test_init(self):
assert self.hook.time_sec_tot == 0
assert self.hook.start_iter == 0
def test_before_run(self):
runner = MagicMock()
runner.iter = 1
self.hook.before_run(runner)
assert self.hook.start_iter == 1
def test_before_epoch(self):
runner = Mock()
self.hook._before_epoch(runner)
assert isinstance(self.hook.t, float)
@patch('time.time', MagicMock(return_value=1))
def test_before_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
self.hook._before_epoch(runner)
for mode in ('train', 'val', 'test'):
self.hook._before_iter(runner, batch_idx=1, mode=mode)
runner.message_hub.update_scalar.assert_called_with(
f'{mode}/data_time', 0)
@patch('time.time', time_patch)
def test_after_iter(self):
runner = MagicMock()
runner.log_buffer = dict()
runner.log_processor.window_size = 10
runner.train_loop.max_iters = 100
runner.iter = 0
runner.test_loop.dataloader = [0] * 20
runner.val_loop.dataloader = [0] * 20
self.hook._before_epoch(runner)
self.hook.before_run(runner)
self.hook._after_iter(runner, batch_idx=1)
runner.message_hub.update_scalar.assert_called()
runner.message_hub.get_log.assert_not_called()
runner.message_hub.update_info.assert_not_called()
runner.message_hub = MessageHub.get_instance('test_iter_timer_hook')
runner.iter = 9
# eta = (100 - 10) / 1
self.hook._after_iter(runner, batch_idx=89)
assert runner.message_hub.get_info('eta') == 90
self.hook._after_iter(runner, batch_idx=9, mode='val')
assert runner.message_hub.get_info('eta') == 10
self.hook._after_iter(runner, batch_idx=19, mode='test')
assert runner.message_hub.get_info('eta') == 0
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import IterTimerHook
class TestIterTimerHook:
def test_before_epoch(self):
hook = IterTimerHook()
runner = Mock()
hook._before_epoch(runner)
assert isinstance(hook.t, float)
def test_before_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._before_iter(runner, 0)
runner.message_hub.update_scalar.assert_called()
def test_after_iter(self):
hook = IterTimerHook()
runner = Mock()
runner.log_buffer = dict()
hook._before_epoch(runner)
hook._after_iter(runner, 0)
runner.message_hub.update_scalar.assert_called()
|
from llama_index.llms.huggingface.base import (
HuggingFaceLLM,
)
__all__ = ["HuggingFaceLLM"]
|
from llama_index.llms.huggingface.base import (
HuggingFaceInferenceAPI,
HuggingFaceLLM,
TextGenerationInference,
)
__all__ = ["HuggingFaceLLM", "HuggingFaceInferenceAPI", "TextGenerationInference"]
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import List, Optional, Sequence
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[dict]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[dict], optional): Data from dataloader.
In order to keep this interface consistent with other hooks,
we keep ``data_batch`` here. Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
from typing import Any, List, Optional, Sequence, Tuple
import torch
from torch.nn.parameter import Parameter
from torch.nn.utils import clip_grad
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class OptimizerHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Defaults to None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with ``loss`` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Defaults to False.
"""
priority = 'HIGH'
def __init__(self,
grad_clip: Optional[dict] = None,
detect_anomalous_params: bool = False) -> None:
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params: List[Parameter]) -> Optional[torch.Tensor]:
"""Clip the gradients of parameters.
Args:
params (list[Parameter]): Model's parameters.
Returns:
Optional[torch.Tensor]: Total norm of the parameters if there is
at least one param requiring gradient, else None.
"""
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
return None
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""All operations need to be finished after each training iteration.
This function will finish following 3 operations:
- Detect any anomalous parameters which are not included in the
training graph. (optional)
- Compute the gradient of model parameters.
- Clip the gradients of each parameter. (optional)
- Update model parameters with gradients.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks,
we keep ``outputs`` here. Defaults to None.
"""
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss: torch.Tensor, runner) -> None:
"""Detect anomalous parameters that are not included in the graph.
Args:
loss (torch.Tensor): The loss of current iteration.
runner (Runner): The runner of the training process.
"""
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime for Websocket interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._set_topology_graph()
self._set_connection_pool()
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
self.args,
topology_graph=self._topology_graph,
connection_pool=self._connection_pool,
logger=self.logger,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
await self._connection_pool.close()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
import asyncio
import logging
import os
from jina import __default_host__
from jina.importer import ImportExtensions
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.gateway.websocket.app import get_fastapi_app
__all__ = ['WebSocketGatewayRuntime']
class WebSocketGatewayRuntime(GatewayRuntime):
"""Runtime for Websocket interface."""
async def async_setup(self):
"""
The async method setup the runtime.
Setup the uvicorn server.
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
self.install_signal_handlers()
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
from jina.helper import extend_rest_interface
uvicorn_kwargs = self.args.uvicorn_kwargs or {}
for ssl_file in ['ssl_keyfile', 'ssl_certfile']:
if getattr(self.args, ssl_file):
if ssl_file not in uvicorn_kwargs.keys():
uvicorn_kwargs[ssl_file] = getattr(self.args, ssl_file)
self._set_topology_graph()
self._set_connection_pool()
self._server = UviServer(
config=Config(
app=extend_rest_interface(
get_fastapi_app(
self.args,
topology_graph=self._topology_graph,
connection_pool=self._connection_pool,
logger=self.logger,
metrics_registry=self.metrics_registry,
)
),
host=__default_host__,
port=self.args.port,
ws_max_size=1024 * 1024 * 1024,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**uvicorn_kwargs
)
)
await self._server.setup()
async def async_run_forever(self):
"""Running method of ther server."""
self._connection_pool.start()
await self._server.serve()
async def _wait_for_cancel(self):
"""Do NOT override this method when inheriting from :class:`GatewayPod`"""
# handle terminate signals
while not self.is_cancel.is_set() and not self._server.should_exit:
await asyncio.sleep(0.1)
await self.async_cancel()
async def async_teardown(self):
"""Shutdown the server."""
await self._server.shutdown()
await self._connection_pool.close()
async def async_cancel(self):
"""Stop the server."""
self._server.should_exit = True
|
from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.
This class is deprecated. See below for an alternative implementation.
Advantages of this implementation include:
- Supports async and streaming;
- Surfaces prompt and text splitter for easier customization;
- Use of JsonOutputParser supports JSONPatch operations in streaming mode,
as well as robustness to markdown.
.. code-block:: python
from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
# Note: import PROMPT if using a legacy non-chat model.
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
llm = ChatOpenAI()
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
split_text = RunnableLambda(
lambda x: text_splitter.create_documents([x])
)
chain = RunnableParallel(
text=RunnablePassthrough(),
questions=(
split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
)
)
"""
llm_chain: LLMChain
"""LLM Chain that generates responses from user input and context."""
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500),
)
"""Text splitter that splits the input into chunks."""
input_key: str = "text"
"""Key of the input to the chain."""
output_key: str = "questions"
"""Key of the output of the chain."""
k: Optional[int] = None
"""Number of questions to generate."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> QAGenerationChain:
"""
Create a QAGenerationChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
Returns:
a QAGenerationChain class
"""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
def input_keys(self) -> list[str]:
return [self.input_key]
@property
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, list]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs],
run_manager=run_manager,
)
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
|
from __future__ import annotations
import json
from typing import Any, Optional
from langchain_core._api import deprecated
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts import BasePromptTemplate
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
from pydantic import Field
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
@deprecated(
since="0.2.7",
alternative=(
"example in API reference with more detail: "
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
),
removal="1.0",
)
class QAGenerationChain(Chain):
"""Base class for question-answer generation chains.
This class is deprecated. See below for an alternative implementation.
Advantages of this implementation include:
- Supports async and streaming;
- Surfaces prompt and text splitter for easier customization;
- Use of JsonOutputParser supports JSONPatch operations in streaming mode,
as well as robustness to markdown.
.. code-block:: python
from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt
# Note: import PROMPT if using a legacy non-chat model.
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.runnables import (
RunnableLambda,
RunnableParallel,
RunnablePassthrough,
)
from langchain_core.runnables.base import RunnableEach
from langchain_openai import ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
llm = ChatOpenAI()
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500)
split_text = RunnableLambda(
lambda x: text_splitter.create_documents([x])
)
chain = RunnableParallel(
text=RunnablePassthrough(),
questions=(
split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
)
)
"""
llm_chain: LLMChain
"""LLM Chain that generates responses from user input and context."""
text_splitter: TextSplitter = Field(
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
)
"""Text splitter that splits the input into chunks."""
input_key: str = "text"
"""Key of the input to the chain."""
output_key: str = "questions"
"""Key of the output of the chain."""
k: Optional[int] = None
"""Number of questions to generate."""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> QAGenerationChain:
"""
Create a QAGenerationChain from a language model.
Args:
llm: a language model
prompt: a prompt template
**kwargs: additional arguments
Returns:
a QAGenerationChain class
"""
_prompt = prompt or PROMPT_SELECTOR.get_prompt(llm)
chain = LLMChain(llm=llm, prompt=_prompt)
return cls(llm_chain=chain, **kwargs)
@property
def _chain_type(self) -> str:
raise NotImplementedError
@property
def input_keys(self) -> list[str]:
return [self.input_key]
@property
def output_keys(self) -> list[str]:
return [self.output_key]
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, list]:
docs = self.text_splitter.create_documents([inputs[self.input_key]])
results = self.llm_chain.generate(
[{"text": d.page_content} for d in docs], run_manager=run_manager
)
qa = [json.loads(res[0].text) for res in results.generations]
return {self.output_key: qa}
|
import numpy as np
from docarray import Image
REMOTE_JPG = (
'https://upload.wikimedia.org/wikipedia/commons/8/80/'
'Dag_Sebastian_Ahlander_at_G%C3%B6teborg_Book_Fair_2012b.jpg'
)
def test_image():
image = Image(url=REMOTE_JPG)
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
import numpy as np
from docarray import Image
def test_image():
image = Image(url='http://jina.ai')
image.tensor = image.url.load()
assert isinstance(image.tensor, np.ndarray)
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks, currently
used in YOLOX.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The interval of change image size. Default: 10.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=10,
device='cuda'):
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_iter(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.iter +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
import random
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks, currently
used in YOLOX.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The interval of change image size. Default: 10.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=10,
device='cuda'):
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_iter(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.iter +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.llmonitor_callback import (
LLMonitorCallbackHandler,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LLMonitorCallbackHandler": "langchain_community.callbacks.llmonitor_callback",
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LLMonitorCallbackHandler",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.callbacks.llmonitor_callback import (
LLMonitorCallbackHandler,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"LLMonitorCallbackHandler": "langchain_community.callbacks.llmonitor_callback"
}
_import_attribute = create_importer(__file__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"LLMonitorCallbackHandler",
]
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
|
"""Loading a pickled model generated by test_pickling.py, only used by
`test_gpu_with_dask.py`"""
import json
import os
import numpy as np
import pytest
from test_gpu_pickling import build_dataset, load_pickle, model_path
import xgboost as xgb
class TestLoadPickle:
def test_load_pkl(self) -> None:
"""Test whether prediction is correct."""
assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1"
bst = load_pickle(model_path)
x, y = build_dataset()
if isinstance(bst, xgb.Booster):
test_x = xgb.DMatrix(x)
res = bst.predict(test_x)
else:
res = bst.predict(x)
assert len(res) == 10
bst.set_params(n_jobs=1) # triggers a re-configuration
res = bst.predict(x)
assert len(res) == 10
def test_context_is_removed(self) -> None:
"""Under invalid CUDA_VISIBLE_DEVICES, context should reset"""
assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1"
bst = load_pickle(model_path)
config = bst.save_config()
config = json.loads(config)
assert config["learner"]["generic_param"]["device"] == "cpu"
def test_context_is_preserved(self) -> None:
"""Test the device context is preserved after pickling."""
assert "CUDA_VISIBLE_DEVICES" not in os.environ.keys()
bst = load_pickle(model_path)
config = bst.save_config()
config = json.loads(config)
assert config["learner"]["generic_param"]["device"] == "cuda:0"
def test_wrap_gpu_id(self) -> None:
assert os.environ["CUDA_VISIBLE_DEVICES"] == "0"
bst = load_pickle(model_path)
config = bst.save_config()
config = json.loads(config)
assert config["learner"]["generic_param"]["device"] == "cuda:0"
x, y = build_dataset()
test_x = xgb.DMatrix(x)
res = bst.predict(test_x)
assert len(res) == 10
def test_training_on_cpu_only_env(self) -> None:
assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1"
rng = np.random.RandomState(1994)
X = rng.randn(10, 10)
y = rng.randn(10)
with pytest.warns(UserWarning, match="No visible GPU is found"):
xgb.train({"device": "cuda"}, xgb.DMatrix(X, y))
|
"""Loading a pickled model generated by test_pickling.py, only used by
`test_gpu_with_dask.py`"""
import json
import os
import numpy as np
import pytest
from test_gpu_pickling import build_dataset, load_pickle, model_path
import xgboost as xgb
from xgboost import testing as tm
class TestLoadPickle:
def test_load_pkl(self) -> None:
"""Test whether prediction is correct."""
assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1"
bst = load_pickle(model_path)
x, y = build_dataset()
if isinstance(bst, xgb.Booster):
test_x = xgb.DMatrix(x)
res = bst.predict(test_x)
else:
res = bst.predict(x)
assert len(res) == 10
bst.set_params(n_jobs=1) # triggers a re-configuration
res = bst.predict(x)
assert len(res) == 10
def test_context_is_removed(self) -> None:
"""Under invalid CUDA_VISIBLE_DEVICES, context should reset"""
assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1"
bst = load_pickle(model_path)
config = bst.save_config()
config = json.loads(config)
assert config["learner"]["generic_param"]["device"] == "cpu"
def test_context_is_preserved(self) -> None:
"""Test the device context is preserved after pickling."""
assert "CUDA_VISIBLE_DEVICES" not in os.environ.keys()
bst = load_pickle(model_path)
config = bst.save_config()
config = json.loads(config)
assert config["learner"]["generic_param"]["device"] == "cuda:0"
def test_wrap_gpu_id(self) -> None:
assert os.environ["CUDA_VISIBLE_DEVICES"] == "0"
bst = load_pickle(model_path)
config = bst.save_config()
config = json.loads(config)
assert config["learner"]["generic_param"]["device"] == "cuda:0"
x, y = build_dataset()
test_x = xgb.DMatrix(x)
res = bst.predict(test_x)
assert len(res) == 10
def test_training_on_cpu_only_env(self) -> None:
assert os.environ["CUDA_VISIBLE_DEVICES"] == "-1"
rng = np.random.RandomState(1994)
X = rng.randn(10, 10)
y = rng.randn(10)
with pytest.warns(UserWarning, match="No visible GPU is found"):
# Test no thrust exception is thrown
with pytest.raises(xgb.core.XGBoostError, match="have at least one device"):
xgb.train({"tree_method": "gpu_hist"}, xgb.DMatrix(X, y))
|
_base_ = '../mask_rcnn/mask-rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 4),
stages=(False, True, True, True),
position='after_conv3')
]))
|
from typing import cast
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
from backend.util.type import typed_cast
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"Nodes": {
"include": typed_cast(
prisma.types.AgentNodeIncludeFromAgentNodeRecursive1,
prisma.types.AgentNodeIncludeFromAgentNode,
AGENT_NODE_INCLUDE,
)
}
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
"include": {
"Input": True,
"Output": True,
"Node": True,
"GraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"NodeExecutions": {
**cast(
prisma.types.FindManyAgentNodeExecutionArgsFromAgentGraphExecution,
GRAPH_EXECUTION_INCLUDE_WITH_NODES["NodeExecutions"],
),
"where": {
"Node": typed_cast(
prisma.types.AgentNodeRelationFilter,
prisma.types.AgentNodeWhereInput,
{
"AgentBlock": {"id": {"in": IO_BLOCK_IDs}},
},
),
"NOT": [{"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE}],
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {
"include": typed_cast(
prisma.types.AgentNodeIncludeFromAgentNodeRecursive1,
prisma.types.AgentNodeInclude,
AGENT_NODE_INCLUDE,
)
}
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"AgentGraph": {
"include": {
**AGENT_GRAPH_INCLUDE,
"Executions": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
import prisma.enums
import prisma.types
from backend.blocks.io import IO_BLOCK_IDs
AGENT_NODE_INCLUDE: prisma.types.AgentNodeInclude = {
"Input": True,
"Output": True,
"Webhook": True,
"AgentBlock": True,
}
AGENT_GRAPH_INCLUDE: prisma.types.AgentGraphInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
EXECUTION_RESULT_INCLUDE: prisma.types.AgentNodeExecutionInclude = {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
}
MAX_NODE_EXECUTIONS_FETCH = 1000
GRAPH_EXECUTION_INCLUDE_WITH_NODES: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
"include": {
"Input": True,
"Output": True,
"AgentNode": True,
"AgentGraphExecution": True,
},
"order_by": [
{"queuedTime": "desc"},
# Fallback: Incomplete execs has no queuedTime.
{"addedTime": "desc"},
],
"take": MAX_NODE_EXECUTIONS_FETCH, # Avoid loading excessive node executions.
}
}
GRAPH_EXECUTION_INCLUDE: prisma.types.AgentGraphExecutionInclude = {
"AgentNodeExecutions": {
**GRAPH_EXECUTION_INCLUDE_WITH_NODES["AgentNodeExecutions"], # type: ignore
"where": {
"AgentNode": {
"AgentBlock": {"id": {"in": IO_BLOCK_IDs}}, # type: ignore
},
"NOT": {
"executionStatus": prisma.enums.AgentExecutionStatus.INCOMPLETE,
},
},
}
}
INTEGRATION_WEBHOOK_INCLUDE: prisma.types.IntegrationWebhookInclude = {
"AgentNodes": {"include": AGENT_NODE_INCLUDE} # type: ignore
}
def library_agent_include(user_id: str) -> prisma.types.LibraryAgentInclude:
return {
"Agent": {
"include": {
**AGENT_GRAPH_INCLUDE,
"AgentGraphExecution": {"where": {"userId": user_id}},
}
},
"Creator": True,
}
|
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Args:
stddev: Float, standard deviation of the noise distribution.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
"""
def __init__(self, stddev, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= stddev <= 1:
raise ValueError(
f"Invalid value received for argument "
"`stddev`. Expected a float value between 0 and 1. "
f"Received: stddev={stddev}"
)
self.stddev = stddev
self.seed = seed
if stddev > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def call(self, inputs, training=False):
if training and self.stddev > 0:
return inputs + backend.random.normal(
shape=ops.shape(inputs),
mean=0.0,
stddev=self.stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"stddev": self.stddev,
"seed": self.seed,
}
return {**base_config, **config}
|
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Args:
stddev: Float, standard deviation of the noise distribution.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
"""
def __init__(self, stddev, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= stddev <= 1:
raise ValueError(
f"Invalid value received for argument "
"`stddev`. Expected a float value between 0 and 1. "
f"Received: stddev={stddev}"
)
self.stddev = stddev
self.seed = seed
if stddev > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.stddev > 0:
return inputs + backend.random.normal(
shape=ops.shape(inputs),
mean=0.0,
stddev=self.stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"stddev": self.stddev,
"seed": self.seed,
}
return {**base_config, **config}
|
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# Example to use different file client
# Method 1: simply set the data root and let the file I/O module
# automatically infer from prefix (not support LMDB and Memcache yet)
# data_root = 's3://openmmlab/datasets/detection/coco/'
# Method 2: Use `backend_args`, `file_client_args` in versions before 3.0.0rc6
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
backend_args = None
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', backend_args=backend_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline,
backend_args=backend_args)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline,
backend_args=backend_args))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox',
backend_args=backend_args)
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
file_client_args = dict(backend='disk')
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric='bbox')
test_evaluator = val_evaluator
# training schedule for 3x with `RepeatDataset`
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# learning rate
# Experiments show that using milestones=[9, 11] has higher performance
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[9, 11],
gamma=0.1)
]
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001))
# Default setting for scaling LR automatically
# - `enable` means enable scaling LR automatically
# or not by default.
# - `base_batch_size` = (8 GPUs) x (2 samples per GPU).
auto_scale_lr = dict(enable=False, base_batch_size=16)
|
"""This module contains the core type definitions and protocols used throughout Dynamo.
The types defined here fall into several categories:
- Guard related types (GuardFn, GuardFail, GuardedCode): Used for tracking and managing guards that protect compiled code
- Frame and cache types (FrameState, CacheEntry): Used for managing interpreter frame state and caching
- Callback protocols (DynamoCallbackFn): Define the interface for frame evaluation callbacks
- Hook protocols (DynamoGuardHook, ProfilerStartHook, ProfilerEndHook, BytecodeHook): Define various hook points for
instrumentation and customization
These types provide the foundational interfaces that enable Dynamo's dynamic compilation and optimization system,
ensuring type safety and clear contracts between different components of the system.
"""
import dataclasses
import types
from typing import Any, Callable, NamedTuple, Optional, Protocol, Union
# CacheEntry has a `guard_manager` field for the guard, and a `code` field for the code object.
from torch._C._dynamo.eval_frame import (
_CacheEntry as CacheEntry,
_ExtraState as ExtraState,
_FrameAction as FrameAction,
_FrameExecStrategy as FrameExecStrategy,
_PyInterpreterFrame as DynamoFrameType,
)
from torch._guards import CompileId, Guard
# We use a dict to store additional data per frame.
FrameState = dict[Any, Any]
class GuardFail(NamedTuple):
# A string repr of the piece of failed guard code we eval-ed
reason: str
# A code object where we failed a guard
orig_code: types.CodeType
@dataclasses.dataclass(frozen=True)
class GuardFilterEntry:
name: str
has_value: bool
value: object
guard_type: str
derived_guard_types: tuple[str, ...]
is_global: bool
orig_guard: Guard
class GuardFn(Protocol):
closure_vars: dict[str, object]
args: list[str]
code_parts: list[str]
verbose_code_parts: list[str]
global_scope: dict[str, object]
guard_fail_fn: Optional[Callable[[GuardFail], None]]
cache_entry: Optional[CacheEntry]
extra_state: Optional[ExtraState]
# maps locals of user function to bool
def __call__(self, f_locals: dict[str, object]) -> bool: ...
@dataclasses.dataclass
class GuardedCode:
code: types.CodeType
guard_manager: GuardFn
compile_id: CompileId
trace_annotation: str = "Unknown"
@dataclasses.dataclass
class ConvertFrameReturn:
# default return is no compiled code (i.e. `return None`):
# strategy is to skip non-recursively, for all future intercepted frames too
# eval frame execution strategy for this frame
frame_exec_strategy: FrameExecStrategy = dataclasses.field(
default_factory=lambda: FrameExecStrategy(FrameAction.SKIP, FrameAction.DEFAULT)
)
# also apply frame_exec strategy to future frames with same code
apply_to_code: bool = True
guarded_code: Optional[GuardedCode] = None
def wrap_guarded_code(guarded_code: GuardedCode) -> ConvertFrameReturn:
return ConvertFrameReturn(
frame_exec_strategy=FrameExecStrategy(FrameAction.DEFAULT, FrameAction.DEFAULT),
guarded_code=guarded_code,
)
class DynamoCallbackFn(Protocol):
def __call__(
self,
frame: DynamoFrameType,
cache_entry: Optional[CacheEntry],
frame_state: FrameState,
) -> ConvertFrameReturn: ...
DynamoCallback = Union[DynamoCallbackFn, None, bool]
class DynamoGuardHook(Protocol):
def __call__(
self,
guard_manager: GuardFn,
code: types.CodeType,
f_locals: dict[str, object],
index: int,
last: bool,
) -> None: ...
class DynamoGuardCompleteHook(Protocol):
def __call__(
self,
cache_hit: bool,
) -> bool: ...
class ProfilerStartHook(Protocol):
def __call__(
self,
name: str,
# TODO(whc) how do I annotate a _RecordFunction here?
) -> Any: ...
class ProfilerEndHook(Protocol):
def __call__(self, record: Any) -> None: ...
class BytecodeHook(Protocol):
def __call__(
self, code: types.CodeType, new_code: types.CodeType
) -> Optional[types.CodeType]: ...
|
"""This module contains the core type definitions and protocols used throughout Dynamo.
The types defined here fall into several categories:
- Guard related types (GuardFn, GuardFail, GuardedCode): Used for tracking and managing guards that protect compiled code
- Frame and cache types (FrameState, CacheEntry): Used for managing interpreter frame state and caching
- Callback protocols (DynamoCallbackFn): Define the interface for frame evaluation callbacks
- Hook protocols (DynamoGuardHook, ProfilerStartHook, ProfilerEndHook, BytecodeHook): Define various hook points for
instrumentation and customization
These types provide the foundational interfaces that enable Dynamo's dynamic compilation and optimization system,
ensuring type safety and clear contracts between different components of the system.
"""
import dataclasses
import types
from typing import Any, Callable, NamedTuple, Optional, Protocol, Union
# CacheEntry has a `guard_manager` field for the guard, and a `code` field for the code object.
from torch._C._dynamo.eval_frame import (
_CacheEntry as CacheEntry,
_ExtraState as ExtraState,
_FrameAction as FrameAction,
_FrameExecStrategy as FrameExecStrategy,
_PyInterpreterFrame as DynamoFrameType,
)
from torch._guards import CompileId, Guard
# We use a dict to store additional data per frame.
FrameState = dict[Any, Any]
class GuardFail(NamedTuple):
# A string repr of the piece of failed guard code we eval-ed
reason: str
# A code object where we failed a guard
orig_code: types.CodeType
@dataclasses.dataclass(frozen=True)
class GuardFilterEntry:
name: str
has_value: bool
value: object
guard_type: str
derived_guard_types: tuple[str, ...]
is_global: bool
orig_guard: Guard
class GuardFn(Protocol):
closure_vars: dict[str, object]
args: list[str]
code_parts: list[str]
verbose_code_parts: list[str]
global_scope: dict[str, object]
guard_fail_fn: Optional[Callable[[GuardFail], None]]
cache_entry: Optional[CacheEntry]
extra_state: Optional[ExtraState]
# maps locals of user function to bool
def __call__(self, f_locals: dict[str, object]) -> bool: ...
@dataclasses.dataclass
class GuardedCode:
code: types.CodeType
guard_manager: GuardFn
compile_id: CompileId
trace_annotation: str = "Unknown"
@dataclasses.dataclass
class ConvertFrameReturn:
# default return is no compiled code (i.e. `return None`):
# strategy is to skip non-recursively, for all future intercepted frames too
# eval frame execution strategy for this frame
frame_exec_strategy: FrameExecStrategy = dataclasses.field(
default_factory=lambda: FrameExecStrategy(FrameAction.SKIP, FrameAction.DEFAULT)
)
# also apply frame_exec strategy to future frames with same code
apply_to_code: bool = True
guarded_code: Optional[GuardedCode] = None
def wrap_guarded_code(guarded_code: GuardedCode) -> ConvertFrameReturn:
return ConvertFrameReturn(
frame_exec_strategy=FrameExecStrategy(FrameAction.DEFAULT, FrameAction.DEFAULT),
guarded_code=guarded_code,
)
class DynamoCallbackFn(Protocol):
def __call__(
self,
frame: DynamoFrameType,
cache_entry: Optional[CacheEntry],
frame_state: FrameState,
) -> ConvertFrameReturn: ...
DynamoCallback = Union[DynamoCallbackFn, None, bool]
class DynamoGuardHook(Protocol):
def __call__(
self,
guard_manager: GuardFn,
code: types.CodeType,
f_locals: dict[str, object],
index: int,
last: bool,
) -> None: ...
class ProfilerStartHook(Protocol):
def __call__(
self,
name: str,
# TODO(whc) how do I annotate a _RecordFunction here?
) -> Any: ...
class ProfilerEndHook(Protocol):
def __call__(self, record: Any) -> None: ...
class BytecodeHook(Protocol):
def __call__(
self, code: types.CodeType, new_code: types.CodeType
) -> Optional[types.CodeType]: ...
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.9.1.dev0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.9.0"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 6:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=6.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './paa_r50_fpn_1x_coco.py'
max_epochs = 36
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[28, 34],
gamma=0.1)
]
# training schedule for 3x
train_cfg = dict(max_epochs=max_epochs)
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomResize', scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
preprocess_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
train_dataloader = dict(batch_size=8, num_workers=4)
optim_wrapper = dict(optimizer=dict(lr=0.01))
# TODO: MMEngine does not support fp16 yet.
# fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
teacher_ckpt = 'http://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth' # noqa
model = dict(
type='LAD',
# student
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# teacher
teacher_ckpt=teacher_ckpt,
teacher_backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
teacher_neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
teacher_bbox_head=dict(
type='LADHead',
reg_decoded_bbox=True,
score_voting=True,
topk=9,
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
score_voting=True,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
data = dict(samples_per_gpu=8, workers_per_gpu=4)
optimizer = dict(lr=0.01)
fp16 = dict(loss_scale=512.)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.