input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
from ._dsp import (
adsr_envelope,
extend_pitch,
filter_waveform,
frequency_impulse_response,
oscillator_bank,
sinc_impulse_response,
)
from .functional import add_noise, barkscale_fbanks, convolve, deemphasis, fftconvolve, preemphasis, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"deemphasis",
"extend_pitch",
"fftconvolve",
"filter_waveform",
"frequency_impulse_response",
"oscillator_bank",
"preemphasis",
"sinc_impulse_response",
"speed",
]
|
from ._dsp import adsr_envelope, extend_pitch, frequency_impulse_response, oscillator_bank, sinc_impulse_response
from .functional import add_noise, barkscale_fbanks, convolve, deemphasis, fftconvolve, preemphasis, speed
__all__ = [
"add_noise",
"adsr_envelope",
"barkscale_fbanks",
"convolve",
"deemphasis",
"extend_pitch",
"fftconvolve",
"frequency_impulse_response",
"oscillator_bank",
"preemphasis",
"sinc_impulse_response",
"speed",
]
|
import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.execution import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
import logging
from autogpt_libs.utils.cache import thread_cached
from backend.data.block import (
Block,
BlockCategory,
BlockInput,
BlockOutput,
BlockSchema,
BlockType,
get_block,
)
from backend.data.execution import ExecutionStatus
from backend.data.model import SchemaField
logger = logging.getLogger(__name__)
@thread_cached
def get_executor_manager_client():
from backend.executor import ExecutionManager
from backend.util.service import get_service_client
return get_service_client(ExecutionManager)
@thread_cached
def get_event_bus():
from backend.data.queue import RedisExecutionEventBus
return RedisExecutionEventBus()
class AgentExecutorBlock(Block):
class Input(BlockSchema):
user_id: str = SchemaField(description="User ID")
graph_id: str = SchemaField(description="Graph ID")
graph_version: int = SchemaField(description="Graph Version")
data: BlockInput = SchemaField(description="Input data for the graph")
input_schema: dict = SchemaField(description="Input schema for the graph")
output_schema: dict = SchemaField(description="Output schema for the graph")
class Output(BlockSchema):
pass
def __init__(self):
super().__init__(
id="e189baac-8c20-45a1-94a7-55177ea42565",
description="Executes an existing agent inside your agent",
input_schema=AgentExecutorBlock.Input,
output_schema=AgentExecutorBlock.Output,
block_type=BlockType.AGENT,
categories={BlockCategory.AGENT},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
executor_manager = get_executor_manager_client()
event_bus = get_event_bus()
graph_exec = executor_manager.add_execution(
graph_id=input_data.graph_id,
graph_version=input_data.graph_version,
user_id=input_data.user_id,
data=input_data.data,
)
log_id = f"Graph #{input_data.graph_id}-V{input_data.graph_version}, exec-id: {graph_exec.graph_exec_id}"
logger.info(f"Starting execution of {log_id}")
for event in event_bus.listen(
graph_id=graph_exec.graph_id, graph_exec_id=graph_exec.graph_exec_id
):
logger.info(
f"Execution {log_id} produced input {event.input_data} output {event.output_data}"
)
if not event.node_id:
if event.status in [ExecutionStatus.COMPLETED, ExecutionStatus.FAILED]:
logger.info(f"Execution {log_id} ended with status {event.status}")
break
else:
continue
if not event.block_id:
logger.warning(f"{log_id} received event without block_id {event}")
continue
block = get_block(event.block_id)
if not block or block.block_type != BlockType.OUTPUT:
continue
output_name = event.input_data.get("name")
if not output_name:
logger.warning(f"{log_id} produced an output with no name {event}")
continue
for output_data in event.output_data.get("output", []):
logger.info(f"Execution {log_id} produced {output_name}: {output_data}")
yield output_name, output_data
|
from __future__ import annotations
import sys
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .Router import Asym, Router
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
sys.modules["sentence_transformers.models.Asym"] = sys.modules["sentence_transformers.models.Router"]
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
"Router",
]
|
from __future__ import annotations
from .Asym import Asym
from .BoW import BoW
from .CLIPModel import CLIPModel
from .CNN import CNN
from .Dense import Dense
from .Dropout import Dropout
from .InputModule import InputModule
from .LayerNorm import LayerNorm
from .LSTM import LSTM
from .Module import Module
from .Normalize import Normalize
from .Pooling import Pooling
from .StaticEmbedding import StaticEmbedding
from .Transformer import Transformer
from .WeightedLayerPooling import WeightedLayerPooling
from .WordEmbeddings import WordEmbeddings
from .WordWeights import WordWeights
__all__ = [
"Transformer",
"StaticEmbedding",
"Asym",
"BoW",
"CNN",
"Dense",
"Dropout",
"LayerNorm",
"LSTM",
"Normalize",
"Pooling",
"WeightedLayerPooling",
"WordEmbeddings",
"WordWeights",
"CLIPModel",
"Module",
"InputModule",
]
|
import csv
from contextlib import nullcontext
from typing import Union, TextIO, Optional, Dict, TYPE_CHECKING, Type, Sequence
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class CsvIOMixin:
"""CSV IO helper.
can be applied to DA & DAM
"""
def save_embeddings_csv(
self, file: Union[str, TextIO], encoding: str = 'utf-8', **kwargs
) -> None:
"""Save embeddings to a CSV file
This function utilizes :meth:`numpy.savetxt` internal.
:param file: File or filename to which the data is saved.
:param encoding: encoding used to save the data into a file. By default, ``utf-8`` is used.
:param kwargs: extra kwargs will be passed to :meth:`numpy.savetxt`.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx:
np.savetxt(file_ctx, self.embeddings, **kwargs)
def save_csv(
self,
file: Union[str, TextIO],
flatten_tags: bool = True,
exclude_fields: Optional[Sequence[str]] = None,
dialect: Union[str, 'csv.Dialect'] = 'excel',
with_header: bool = True,
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a CSV file.
:param file: File or filename to which the data is saved.
:param flatten_tags: if set, then all fields in ``Document.tags`` will be flattened into ``tag__fieldname`` and
stored as separated columns. It is useful when ``tags`` contain a lot of information.
:param exclude_fields: if set, those fields wont show up in the output CSV
:param dialect: define a set of parameters specific to a particular CSV dialect. could be a string that represents
predefined dialects in your system, or could be a :class:`csv.Dialect` class that groups specific formatting
parameters together.
:param encoding: encoding used to save the data into a CSV file. By default, ``utf-8`` is used.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx as fp:
if flatten_tags and self[0].tags:
keys = list(self[0].non_empty_fields) + list(
f'tag__{k}' for k in self[0].tags
)
keys.remove('tags')
else:
flatten_tags = False
keys = list(self[0].non_empty_fields)
if exclude_fields:
for k in exclude_fields:
if k in keys:
keys.remove(k)
writer = csv.DictWriter(fp, fieldnames=keys, dialect=dialect)
if with_header:
writer.writeheader()
for d in self:
pd = d.to_dict(
protocol='jsonschema',
exclude=set(exclude_fields) if exclude_fields else None,
exclude_none=True,
)
if flatten_tags:
t = pd.pop('tags')
pd.update({f'tag__{k}': v for k, v in t.items()})
writer.writerow(pd)
@classmethod
def load_csv(
cls: Type['T'],
file: Union[str, TextIO],
field_resolver: Optional[Dict[str, str]] = None,
encoding: str = 'utf-8',
) -> 'T':
"""Load array elements from a binary file.
:param file: File or filename to which the data is saved.
:param field_resolver: a map from field names defined in JSON, dict to the field
names defined in Document.
:param encoding: encoding used to read a CSV file. By default, ``utf-8`` is used.
:return: a DocumentArray object
"""
from docarray.document.generators import from_csv
return cls(from_csv(file, field_resolver=field_resolver, encoding=encoding))
|
import csv
from contextlib import nullcontext
from typing import Union, TextIO, Optional, Dict, TYPE_CHECKING, Type, Sequence
import numpy as np
if TYPE_CHECKING:
from ....typing import T
class CsvIOMixin:
"""CSV IO helper.
can be applied to DA & DAM
"""
def save_embeddings_csv(
self, file: Union[str, TextIO], encoding: str = 'utf-8', **kwargs
) -> None:
"""Save embeddings to a CSV file
This function utilizes :meth:`numpy.savetxt` internal.
:param file: File or filename to which the data is saved.
:param encoding: encoding used to save the data into a file. By default, ``utf-8`` is used.
:param kwargs: extra kwargs will be passed to :meth:`numpy.savetxt`.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx:
np.savetxt(file_ctx, self.embeddings, **kwargs)
def save_csv(
self,
file: Union[str, TextIO],
flatten_tags: bool = True,
exclude_fields: Optional[Sequence[str]] = None,
dialect: Union[str, 'csv.Dialect'] = 'excel',
with_header: bool = True,
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a CSV file.
:param file: File or filename to which the data is saved.
:param flatten_tags: if set, then all fields in ``Document.tags`` will be flattened into ``tag__fieldname`` and
stored as separated columns. It is useful when ``tags`` contain a lot of information.
:param exclude_fields: if set, those fields wont show up in the output CSV
:param dialect: define a set of parameters specific to a particular CSV dialect. could be a string that represents
predefined dialects in your system, or could be a :class:`csv.Dialect` class that groups specific formatting
parameters together.
:param encoding: encoding used to save the data into a CSV file. By default, ``utf-8`` is used.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx as fp:
if flatten_tags and self[0].tags:
keys = list(self[0].non_empty_fields) + list(
f'tag__{k}' for k in self[0].tags
)
keys.remove('tags')
else:
flatten_tags = False
keys = list(self[0].non_empty_fields)
if exclude_fields:
for k in exclude_fields:
if k in keys:
keys.remove(k)
writer = csv.DictWriter(fp, fieldnames=keys, dialect=dialect)
if with_header:
writer.writeheader()
for d in self:
pd = d.to_dict(
protocol='jsonschema',
exclude=set(exclude_fields) if exclude_fields else None,
exclude_none=True,
)
if flatten_tags:
t = pd.pop('tags')
pd.update({f'tag__{k}': v for k, v in t.items()})
writer.writerow(pd)
@classmethod
def load_csv(
cls: Type['T'],
file: Union[str, TextIO],
field_resolver: Optional[Dict[str, str]] = None,
encoding: str = 'utf-8',
) -> 'T':
"""Load array elements from a binary file.
:param file: File or filename to which the data is saved.
:param field_resolver: a map from field names defined in JSON, dict to the field
names defined in Document.
:param encoding: encoding used to read a CSV file. By default, ``utf-8`` is used.
:return: a DocumentArray object
"""
from ....document.generators import from_csv
return cls(from_csv(file, field_resolver=field_resolver, encoding=encoding))
|
from langchain.output_parsers.regex import RegexParser
# NOTE: The almost same constant variables in ./test_combining_parser.py
DEF_EXPECTED_RESULT = {
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_regex_parser_parse() -> None:
"""Test regex parser parse."""
parser = RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
)
assert DEF_EXPECTED_RESULT == parser.parse(DEF_README)
def test_regex_parser_output_type() -> None:
"""Test regex parser output type is Dict[str, str]."""
parser = RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
)
assert parser.OutputType == dict[str, str]
|
from typing import Dict
from langchain.output_parsers.regex import RegexParser
# NOTE: The almost same constant variables in ./test_combining_parser.py
DEF_EXPECTED_RESULT = {
"confidence": "A",
"explanation": "Paris is the capital of France according to Wikipedia.",
}
DEF_README = """```json
{
"answer": "Paris",
"source": "https://en.wikipedia.org/wiki/France"
}
```
//Confidence: A, Explanation: Paris is the capital of France according to Wikipedia."""
def test_regex_parser_parse() -> None:
"""Test regex parser parse."""
parser = RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
)
assert DEF_EXPECTED_RESULT == parser.parse(DEF_README)
def test_regex_parser_output_type() -> None:
"""Test regex parser output type is Dict[str, str]."""
parser = RegexParser(
regex=r"Confidence: (A|B|C), Explanation: (.*)",
output_keys=["confidence", "explanation"],
default_output_key="noConfidence",
)
assert parser.OutputType is Dict[str, str]
|
import json
import re
import sys
from functools import cache
from pathlib import Path
from typing import Dict, Iterable, List, Union
CURR_DIR = Path(__file__).parent.absolute()
CLI_TEMPLATE_DIR = (
CURR_DIR.parent.parent / "libs/cli/langchain_cli/integration_template/docs"
)
INFO_BY_DIR: Dict[str, Dict[str, Union[int, str]]] = {
"chat": {
"issue_number": 22296,
},
"document_loaders": {
"issue_number": 22866,
},
"stores": {"issue_number": 24888},
"llms": {
"issue_number": 24803,
},
"text_embedding": {"issue_number": 14856},
"toolkits": {"issue_number": 24820},
"tools": {"issue_number": "TODO"},
"vectorstores": {"issue_number": 24800},
"retrievers": {"issue_number": 24908},
}
@cache
def _get_headers(doc_dir: str) -> Iterable[str]:
"""Gets all markdown headers ## and below from the integration template.
Ignores headers that contain "TODO"."""
ipynb_name = f"{doc_dir}.ipynb"
if not (CLI_TEMPLATE_DIR / ipynb_name).exists():
raise FileNotFoundError(f"Could not find {ipynb_name} in {CLI_TEMPLATE_DIR}")
with open(CLI_TEMPLATE_DIR / ipynb_name, "r") as f:
nb = json.load(f)
headers: List[str] = []
for cell in nb["cells"]:
if cell["cell_type"] == "markdown":
for line in cell["source"]:
if not line.startswith("## ") or "TODO" in line:
continue
header = line.strip()
headers.append(header)
return headers
def check_header_order(path: Path) -> None:
if path.name.startswith("index."):
# skip index pages
return
doc_dir = path.parent.name
if doc_dir not in INFO_BY_DIR:
# Skip if not a directory we care about
return
if "toolkit" in path.name:
headers = _get_headers("toolkits")
else:
headers = _get_headers(doc_dir)
issue_number = INFO_BY_DIR[doc_dir].get("issue_number", "nonexistent")
print(f"Checking {doc_dir} page {path}")
with open(path, "r") as f:
doc = f.read()
notfound = []
for header in headers:
index = doc.find(header)
if index == -1:
notfound.append(header)
doc = doc[index + len(header) :]
if notfound:
notfound_headers = "\n- ".join(notfound)
raise ValueError(
f"Document {path} is missing headers:"
"\n- "
f"{notfound_headers}"
"\n\n"
"Please see https://github.com/langchain-ai/langchain/issues/"
f"{issue_number} for instructions on how to correctly format a "
f"{doc_dir} integration page."
)
def main(*new_doc_paths: Union[str, Path]) -> None:
for path in new_doc_paths:
path = Path(path).resolve().absolute()
if CURR_DIR.parent / "docs" / "integrations" in path.parents:
check_header_order(path)
else:
continue
if __name__ == "__main__":
main(*sys.argv[1:])
|
import json
import re
import sys
from functools import cache
from pathlib import Path
from typing import Dict, Iterable, List, Union
CURR_DIR = Path(__file__).parent.absolute()
CLI_TEMPLATE_DIR = (
CURR_DIR.parent.parent / "libs/cli/langchain_cli/integration_template/docs"
)
INFO_BY_DIR: Dict[str, Dict[str, Union[int, str]]] = {
"chat": {
"issue_number": 22296,
},
"document_loaders": {
"issue_number": 22866,
},
"stores": {"issue_number": 24888},
"llms": {
"issue_number": 24803,
},
"text_embedding": {"issue_number": 14856},
"toolkits": {"issue_number": 24820},
"tools": {"issue_number": "TODO"},
"vectorstores": {"issue_number": 24800},
"retrievers": {"issue_number": 24908},
}
@cache
def _get_headers(doc_dir: str) -> Iterable[str]:
"""Gets all markdown headers ## and below from the integration template.
Ignores headers that contain "TODO"."""
ipynb_name = f"{doc_dir}.ipynb"
if not (CLI_TEMPLATE_DIR / ipynb_name).exists():
raise FileNotFoundError(f"Could not find {ipynb_name} in {CLI_TEMPLATE_DIR}")
with open(CLI_TEMPLATE_DIR / ipynb_name, "r") as f:
nb = json.load(f)
headers: List[str] = []
for cell in nb["cells"]:
if cell["cell_type"] == "markdown":
for line in cell["source"]:
if not line.startswith("## ") or "TODO" in line:
continue
header = line.strip()
headers.append(header)
return headers
def check_header_order(path: Path) -> None:
if path.name.startswith("index."):
# skip index pages
return
doc_dir = path.parent.name
if doc_dir not in INFO_BY_DIR:
# Skip if not a directory we care about
return
headers = _get_headers(doc_dir)
issue_number = INFO_BY_DIR[doc_dir].get("issue_number", "nonexistent")
print(f"Checking {doc_dir} page {path}")
with open(path, "r") as f:
doc = f.read()
notfound = []
for header in headers:
index = doc.find(header)
if index == -1:
notfound.append(header)
doc = doc[index + len(header) :]
if notfound:
notfound_headers = "\n- ".join(notfound)
raise ValueError(
f"Document {path} is missing headers:"
"\n- "
f"{notfound_headers}"
"\n\n"
"Please see https://github.com/langchain-ai/langchain/issues/"
f"{issue_number} for instructions on how to correctly format a "
f"{doc_dir} integration page."
)
def main(*new_doc_paths: Union[str, Path]) -> None:
for path in new_doc_paths:
path = Path(path).resolve().absolute()
if CURR_DIR.parent / "docs" / "integrations" in path.parents:
check_header_order(path)
else:
continue
if __name__ == "__main__":
main(*sys.argv[1:])
|
"""Tool for the Google Books API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_books import GoogleBooksAPIWrapper
class GoogleBooksQueryInput(BaseModel):
"""Input for the GoogleBooksQuery tool."""
query: str = Field(description="query to look up on google books")
class GoogleBooksQueryRun(BaseTool):
"""Tool that searches the Google Books API."""
name: str = "GoogleBooks"
description: str = (
"A wrapper around Google Books. "
"Useful for when you need to answer general inquiries about "
"books of certain topics and generate recommendation based "
"off of key words"
"Input should be a query string"
)
api_wrapper: GoogleBooksAPIWrapper
args_schema: Type[BaseModel] = GoogleBooksQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Google Books tool."""
return self.api_wrapper.run(query)
|
"""Tool for the Google Books API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_books import GoogleBooksAPIWrapper
class GoogleBooksQueryInput(BaseModel):
"""Input for the GoogleBooksQuery tool."""
query: str = Field(description="query to look up on google books")
class GoogleBooksQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Google Books API."""
name: str = "GoogleBooks"
description: str = (
"A wrapper around Google Books. "
"Useful for when you need to answer general inquiries about "
"books of certain topics and generate recommendation based "
"off of key words"
"Input should be a query string"
)
api_wrapper: GoogleBooksAPIWrapper
args_schema: Type[BaseModel] = GoogleBooksQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Google Books tool."""
return self.api_wrapper.run(query)
|
import time
from typing import Tuple
from redis import Redis
from .config import RATE_LIMIT_SETTINGS
class RateLimiter:
def __init__(
self,
redis_host: str = RATE_LIMIT_SETTINGS.redis_host,
redis_port: str = RATE_LIMIT_SETTINGS.redis_port,
redis_password: str = RATE_LIMIT_SETTINGS.redis_password,
requests_per_minute: int = RATE_LIMIT_SETTINGS.requests_per_minute,
):
self.redis = Redis(
host=redis_host,
port=int(redis_port),
password=redis_password,
decode_responses=True,
)
self.window = 60
self.max_requests = requests_per_minute
async def check_rate_limit(self, api_key_id: str) -> Tuple[bool, int, int]:
"""
Check if request is within rate limits.
Args:
api_key_id: The API key identifier to check
Returns:
Tuple of (is_allowed, remaining_requests, reset_time)
"""
now = time.time()
window_start = now - self.window
key = f"ratelimit:{api_key_id}:1min"
pipe = self.redis.pipeline()
pipe.zremrangebyscore(key, 0, window_start)
pipe.zadd(key, {str(now): now})
pipe.zcount(key, window_start, now)
pipe.expire(key, self.window)
_, _, request_count, _ = pipe.execute()
remaining = max(0, self.max_requests - request_count)
reset_time = int(now + self.window)
return request_count <= self.max_requests, remaining, reset_time
|
import time
from typing import Tuple
from redis import Redis
from .config import RATE_LIMIT_SETTINGS
class RateLimiter:
def __init__(
self,
redis_host: str = RATE_LIMIT_SETTINGS.redis_host,
redis_port: str = RATE_LIMIT_SETTINGS.redis_port,
redis_password: str = RATE_LIMIT_SETTINGS.redis_password,
requests_per_minute: int = RATE_LIMIT_SETTINGS.requests_per_minute,
):
self.redis = Redis(
host=redis_host,
port=redis_port,
password=redis_password,
decode_responses=True,
)
self.window = 60
self.max_requests = requests_per_minute
async def check_rate_limit(self, api_key_id: str) -> Tuple[bool, int, int]:
"""
Check if request is within rate limits.
Args:
api_key_id: The API key identifier to check
Returns:
Tuple of (is_allowed, remaining_requests, reset_time)
"""
now = time.time()
window_start = now - self.window
key = f"ratelimit:{api_key_id}:1min"
pipe = self.redis.pipeline()
pipe.zremrangebyscore(key, 0, window_start)
pipe.zadd(key, {str(now): now})
pipe.zcount(key, window_start, now)
pipe.expire(key, self.window)
_, _, request_count, _ = pipe.execute()
remaining = max(0, self.max_requests - request_count)
reset_time = int(now + self.window)
return request_count <= self.max_requests, remaining, reset_time
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks.
SyncRandomSizeHook is deprecated, please use Resize pipeline
to achieve similar functions. Such as `dict(type='Resize', img_scale=[(448,
448), (832, 832)], multiscale_mode='range', keep_ratio=True)`.
Note: Due to the multi-process dataloader, its behavior is different
from YOLOX's official implementation, the official is to change the
size every fixed iteration interval and what we achieved is a fixed
epoch interval.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The epoch interval of change image size. Default: 1.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=1,
device='cuda'):
warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '
'Please use Resize pipeline to achieve similar '
'functions. Due to the multi-process dataloader, '
'its behavior is different from YOLOX\'s official '
'implementation, the official is to change the size '
'every fixed iteration interval and what we achieved '
'is a fixed epoch interval.')
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_epoch(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.epoch +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
# Copyright (c) OpenMMLab. All rights reserved.
import random
import torch
from mmcv.runner import get_dist_info
from mmcv.runner.hooks import HOOKS, Hook
from torch import distributed as dist
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"""Change and synchronize the random image size across ranks, currently
used in YOLOX.
Args:
ratio_range (tuple[int]): Random ratio range. It will be multiplied
by 32, and then change the dataset output image size.
Default: (14, 26).
img_scale (tuple[int]): Size of input image. Default: (640, 640).
interval (int): The interval of change image size. Default: 10.
device (torch.device | str): device for returned tensors.
Default: 'cuda'.
"""
def __init__(self,
ratio_range=(14, 26),
img_scale=(640, 640),
interval=10,
device='cuda'):
self.rank, world_size = get_dist_info()
self.is_distributed = world_size > 1
self.ratio_range = ratio_range
self.img_scale = img_scale
self.interval = interval
self.device = device
def after_train_iter(self, runner):
"""Change the dataset output image size."""
if self.ratio_range is not None and (runner.iter +
1) % self.interval == 0:
# Due to DDP and DP get the device behavior inconsistent,
# so we did not get the device from runner.model.
tensor = torch.LongTensor(2).to(self.device)
if self.rank == 0:
size_factor = self.img_scale[1] * 1. / self.img_scale[0]
size = random.randint(*self.ratio_range)
size = (int(32 * size), 32 * int(size * size_factor))
tensor[0] = size[0]
tensor[1] = size[1]
if self.is_distributed:
dist.barrier()
dist.broadcast(tensor, 0)
runner.data_loader.dataset.update_dynamic_scale(
(tensor[0].item(), tensor[1].item()))
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Check out the paper introducing this method : https://arxiv.org/pdf/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class BitNetHfQuantizer(HfQuantizer):
"""
1.58-bit quantization from BitNet quantization method:
Before loading: it converts the linear layers into BitLinear layers during loading.
Checkout the paper introducing this method : https://arxiv.org/pdf/2402.17764
"""
requires_parameters_quantization = False
requires_calibration = True
required_packages = ["accelerate"]
def __init__(self, quantization_config, **kwargs):
super().__init__(quantization_config, **kwargs)
self.quantization_config = quantization_config
def validate_environment(self, *args, **kwargs):
if not is_accelerate_available():
raise ImportError("Loading a BitNet quantized model requires accelerate (`pip install accelerate`)")
if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
raise ValueError(
"Loading ternary weights from tf/flax is currently not supported, please make"
" sure the weights are in PyTorch format."
)
if not torch.cuda.is_available():
logger.warning_once(
"You don't have a GPU available to load the model, the inference will be slow because of weight unpacking"
)
return
device_map = kwargs.get("device_map", None)
if device_map is None:
logger.warning_once(
"You have loaded a BitNet model on CPU and have a CUDA device available, make sure to set "
"your model on a GPU device in order to run your model."
)
elif device_map is not None:
if isinstance(device_map, dict) and ("cpu" in device_map.values() or "disk" in device_map.values()):
raise ValueError(
"You are attempting to load a BitNet model with a device_map that contains a CPU or disk device."
"This is not supported. Please remove the CPU or disk device from the device_map."
)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
return model
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
keep_in_fp32_modules: Optional[List[str]] = None,
**kwargs,
):
from ..integrations import replace_with_bitnet_linear
self.modules_to_not_convert = self.get_modules_to_not_convert(
model, self.quantization_config.modules_to_not_convert, keep_in_fp32_modules
)
model = replace_with_bitnet_linear(
model,
modules_to_not_convert=self.modules_to_not_convert,
quantization_config=self.quantization_config,
pre_quantized=self.pre_quantized,
)
def adjust_max_memory(self, max_memory: Dict[str, Union[int, str]]) -> Dict[str, Union[int, str]]:
max_memory = {key: val * 0.90 for key, val in max_memory.items()}
return max_memory
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
target_dtype = torch.int8
return target_dtype
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
@property
def is_qat_trainable(self) -> bool:
"""Flag indicating whether the quantized model can carry out quantization aware training"""
return (
self.quantization_config.linear_class == "autobitlinear"
and self.quantization_config.quantization_mode == "online"
)
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.compound.FaissPostgresIndexer import FaissPostgresIndexer
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_integration_parallel(docker_compose):
# test issue reported by @florian
SHARDS = 3
with Flow().add(
uses='FaissPostgresIndexer', shards=SHARDS, uses_with={'total_shards': 3}
) as f:
f.index(Document())
|
import os
import pytest
from jina import Document, Flow
from jinahub.indexers.searcher.compound.FaissPostgresIndexer import FaissPostgresIndexer
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_integration_parallel(docker_compose):
# test issue reported by @florian
SHARDS = 3
with Flow().add(
uses='FaissPostgresIndexer', shards=SHARDS, uses_with={'total_shards': 3}
) as f:
f.index(Document())
|
import random
from datetime import datetime, timedelta
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import comma_list
def _generate_random_datetime_strings(
pattern: str,
n: int = 3,
start_date: datetime = datetime(1, 1, 1),
end_date: datetime = datetime.now() + timedelta(days=3650),
) -> list[str]:
"""Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that used as the datetime format."""
def get_format_instructions(self) -> str:
examples = comma_list(_generate_random_datetime_strings(self.format))
return (
f"Write a datetime string that matches the "
f"following pattern: '{self.format}'.\n\n"
f"Examples: {examples}\n\n"
f"Return ONLY this string, no other words!"
)
def parse(self, response: str) -> datetime:
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
def _type(self) -> str:
return "datetime"
|
import random
from datetime import datetime, timedelta
from typing import List
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import comma_list
def _generate_random_datetime_strings(
pattern: str,
n: int = 3,
start_date: datetime = datetime(1, 1, 1),
end_date: datetime = datetime.now() + timedelta(days=3650),
) -> List[str]:
"""Generates n random datetime strings conforming to the
given pattern within the specified date range.
Pattern should be a string containing the desired format codes.
start_date and end_date should be datetime objects representing
the start and end of the date range.
"""
examples = []
delta = end_date - start_date
for i in range(n):
random_delta = random.uniform(0, delta.total_seconds())
dt = start_date + timedelta(seconds=random_delta)
date_string = dt.strftime(pattern)
examples.append(date_string)
return examples
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that used as the datetime format."""
def get_format_instructions(self) -> str:
examples = comma_list(_generate_random_datetime_strings(self.format))
return (
f"Write a datetime string that matches the "
f"following pattern: '{self.format}'.\n\n"
f"Examples: {examples}\n\n"
f"Return ONLY this string, no other words!"
)
def parse(self, response: str) -> datetime:
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
def _type(self) -> str:
return "datetime"
|
import warnings
from typing import Any, List, Union
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `to_dtype(..., dtype=torch.float32, scale=True)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
import warnings
from typing import Any, List, Union
import torch
from torchvision import datapoints
from torchvision.transforms import functional as _F
@torch.jit.unused
def to_tensor(inpt: Any) -> torch.Tensor:
warnings.warn(
"The function `to_tensor(...)` is deprecated and will be removed in a future release. "
"Instead, please use `to_image_tensor(...)` followed by `convert_image_dtype(...)`."
)
return _F.to_tensor(inpt)
def get_image_size(inpt: Union[datapoints._ImageTypeJIT, datapoints._VideoTypeJIT]) -> List[int]:
warnings.warn(
"The function `get_image_size(...)` is deprecated and will be removed in a future release. "
"Instead, please use `get_spatial_size(...)` which returns `[h, w]` instead of `[w, h]`."
)
return _F.get_image_size(inpt)
|
_base_ = './yolox_s_8xb8-300e_coco.py'
# model settings
model = dict(
backbone=dict(deepen_factor=1.0, widen_factor=1.0),
neck=dict(
in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3),
bbox_head=dict(in_channels=256, feat_channels=256))
|
_base_ = './yolox_s_8x8_300e_coco.py'
# model settings
model = dict(
backbone=dict(deepen_factor=1.0, widen_factor=1.0),
neck=dict(
in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3),
bbox_head=dict(in_channels=256, feat_channels=256))
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_true(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'true')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(autouse=True)
def set_test_pip_version() -> None:
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed
del os.environ['JINA_GATEWAY_IMAGE']
|
import asyncio
import os
import random
import string
import tempfile
import time
import pytest
from jina import helper
@pytest.fixture(scope='function')
def random_workspace_name():
"""Generate a random workspace name with digits and letters."""
rand = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
return f'JINA_TEST_WORKSPACE_{rand}'
@pytest.fixture(scope='function')
def test_metas(tmpdir, random_workspace_name):
from jina.serve.executors.metas import get_default_metas
os.environ[random_workspace_name] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ[random_workspace_name]
yield metas
del os.environ[random_workspace_name]
@pytest.fixture()
def docker_compose(request):
os.system(
f"docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans"
)
time.sleep(10)
yield
os.system(
f"docker-compose -f {request.param} --project-directory . down --remove-orphans"
)
@pytest.fixture(scope='function')
def port_generator():
generated_ports = set()
def random_port():
port = helper.random_port()
while port in generated_ports:
port = helper.random_port()
generated_ports.add(port)
return port
return random_port
@pytest.fixture(autouse=True)
def test_log_level(monkeypatch):
monkeypatch.setenv('JINA_LOG_LEVEL', 'DEBUG')
@pytest.fixture(autouse=True)
def test_grpc_fork_support_false(monkeypatch):
monkeypatch.setenv('GRPC_ENABLE_FORK_SUPPORT', 'true')
@pytest.fixture(autouse=True)
def test_timeout_ctrl_time(monkeypatch):
monkeypatch.setenv('JINA_DEFAULT_TIMEOUT_CTRL', '500')
@pytest.fixture(autouse=True)
def test_disable_telemetry(monkeypatch):
monkeypatch.setenv('JINA_OPTOUT_TELEMETRY', 'True')
@pytest.fixture(autouse=True)
def tmpfile(tmpdir):
tmpfile = f'jina_test_{next(tempfile._get_candidate_names())}.db'
return tmpdir / tmpfile
@pytest.fixture(scope='session')
def event_loop(request):
"""
Valid only for `pytest.mark.asyncio` tests
"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(autouse=True)
def set_test_pip_version() -> None:
os.environ['JINA_GATEWAY_IMAGE'] = 'jinaai/jina:test-pip'
yield
if 'JINA_GATEWAY_IMAGE' in os.environ: # maybe another fixture has already removed
del os.environ['JINA_GATEWAY_IMAGE']
|
import re
import sys
file_name = sys.argv[1]
with open(file_name, 'r', encoding='utf-8') as f:
input = f.read()
# official semver regex: https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
versions_regex = '(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)'
output = re.sub(
f'(?P<dep>[a-zA-Z0-9]+)(==|>=)(?P<version>{versions_regex}).*:',
r'\g<dep>==\g<version>:',
input,
)
with open(file_name, 'w', encoding='utf-8') as f:
f.write(output)
|
import re
import sys
file_name = sys.argv[1]
with open(file_name, 'r') as f:
input = f.read()
# official semver regex: https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
versions_regex = '(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)'
output = re.sub(
f'(?P<dep>[a-zA-Z0-9]+)(==|>=)(?P<version>{versions_regex}).*:',
r'\g<dep>==\g<version>:',
input,
)
with open(file_name, 'w') as f:
f.write(output)
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfCudaSmallMemory,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfCudaSmallMemory",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"zip_equal",
]
|
from .backend_utils import set_audio_backend
from .case_utils import (
HttpServerMixin,
is_ffmpeg_available,
PytorchTestCase,
skipIfNoCtcDecoder,
skipIfNoCuda,
skipIfNoExec,
skipIfNoFFmpeg,
skipIfNoKaldi,
skipIfNoModule,
skipIfNoQengine,
skipIfNoSox,
skipIfPy310,
skipIfRocm,
TempDirMixin,
TestBaseMixin,
TorchaudioTestCase,
zip_equal,
)
from .data_utils import get_asset_path, get_sinusoid, get_spectrogram, get_whitenoise
from .func_utils import torch_script
from .image_utils import get_image, rgb_to_gray, rgb_to_yuv_ccir, save_image
from .parameterized_utils import load_params, nested_params
from .wav_utils import get_wav_data, load_wav, normalize_wav, save_wav
__all__ = [
"get_asset_path",
"get_whitenoise",
"get_sinusoid",
"get_spectrogram",
"set_audio_backend",
"TempDirMixin",
"HttpServerMixin",
"TestBaseMixin",
"PytorchTestCase",
"TorchaudioTestCase",
"is_ffmpeg_available",
"skipIfNoCtcDecoder",
"skipIfNoCuda",
"skipIfNoExec",
"skipIfNoModule",
"skipIfNoKaldi",
"skipIfNoSox",
"skipIfNoSoxBackend",
"skipIfRocm",
"skipIfNoQengine",
"skipIfNoFFmpeg",
"skipIfPy310",
"get_wav_data",
"normalize_wav",
"load_wav",
"save_wav",
"load_params",
"nested_params",
"torch_script",
"save_image",
"get_image",
"rgb_to_gray",
"rgb_to_yuv_ccir",
"zip_equal",
]
|
import os
import torchaudio
import torchvision
from torch.utils.data import Dataset
def _load_list(args, *filenames):
output = []
length = []
for filename in filenames:
filepath = os.path.join(args.root_dir, "labels", filename)
for line in open(filepath).read().splitlines():
dataset, rel_path, input_length = line.split(",")[0], line.split(",")[1], line.split(",")[2]
path = os.path.normpath(os.path.join(args.root_dir, dataset, rel_path[:-4] + ".mp4"))
length.append(int(input_length))
output.append(path)
return output, length
def load_video(path):
"""
rtype: torch, T x C x H x W
"""
vid = torchvision.io.read_video(path, pts_unit="sec", output_format="THWC")[0]
vid = vid.permute((0, 3, 1, 2))
return vid
def load_audio(path):
"""
rtype: torch, T x 1
"""
waveform, sample_rate = torchaudio.load(path, normalize=True)
return waveform.transpose(1, 0)
def load_transcript(path):
transcript_path = path.replace("video_seg", "text_seg")[:-4] + ".txt"
return open(transcript_path).read().splitlines()[0]
def load_item(path, modality):
if modality == "video":
return (load_video(path), load_transcript(path))
if modality == "audio":
return (load_audio(path), load_transcript(path))
if modality == "audiovisual":
return (load_audio(path), load_video(path), load_transcript(path))
class LRS3(Dataset):
def __init__(
self,
args,
subset: str = "train",
) -> None:
if subset is not None and subset not in ["train", "val", "test"]:
raise ValueError("When `subset` is not None, it must be one of ['train', 'val', 'test'].")
self.args = args
if subset == "train":
self.files, self.lengths = _load_list(self.args, "lrs3_train_transcript_lengths_seg16s.csv")
if subset == "val":
self.files, self.lengths = _load_list(self.args, "lrs3_test_transcript_lengths_seg16s.csv")
if subset == "test":
self.files, self.lengths = _load_list(self.args, "lrs3_test_transcript_lengths_seg16s.csv")
def __getitem__(self, n):
path = self.files[n]
return load_item(path, self.args.modality)
def __len__(self) -> int:
return len(self.files)
|
import os
import torchaudio
import torchvision
from torch.utils.data import Dataset
def _load_list(args, *filenames):
output = []
length = []
for filename in filenames:
filepath = os.path.join(args.root_dir, "labels", filename)
for line in open(filepath).read().splitlines():
dataset, rel_path, input_length = line.split(",")[0], line.split(",")[1], line.split(",")[2]
path = os.path.normpath(os.path.join(args.root_dir, dataset, rel_path[:-4] + ".mp4"))
length.append(int(input_length))
output.append(path)
return output, length
def load_video(path):
"""
rtype: torch, T x C x H x W
"""
vid = torchvision.io.read_video(path, pts_unit="sec", output_format="THWC")[0]
vid = vid.permute((0, 3, 1, 2))
return vid
def load_audio(path):
"""
rtype: torch, T x 1
"""
waveform, sample_rate = torchaudio.load(path, normalize=True)
return waveform.transpose(1, 0)
def load_transcript(path):
transcript_path = path.replace("video_seg", "text_seg")[:-4] + ".txt"
return open(transcript_path).read().splitlines()[0]
def load_item(path, md):
if md == "v":
return (load_video(path), load_transcript(path))
if md == "a":
return (load_audio(path), load_transcript(path))
if md == "av":
return (load_audio(path), load_video(path), load_transcript(path))
class LRS3(Dataset):
def __init__(
self,
args,
subset: str = "train",
) -> None:
if subset is not None and subset not in ["train", "val", "test"]:
raise ValueError("When `subset` is not None, it must be one of ['train', 'val', 'test'].")
self.args = args
if subset == "train":
self._filelist, self._lengthlist = _load_list(self.args, "lrs3_train_transcript_lengths_seg16s.csv")
if subset == "val":
self._filelist, self._lengthlist = _load_list(self.args, "lrs3_test_transcript_lengths_seg16s.csv")
if subset == "test":
self._filelist, self._lengthlist = _load_list(self.args, "lrs3_test_transcript_lengths_seg16s.csv")
def __getitem__(self, n):
path = self._filelist[n]
return load_item(path, self.args.md)
def __len__(self) -> int:
return len(self._filelist)
|
from typing import Optional
from typing_extensions import TypeAlias
import torch
from torch import Tensor
from torch.autograd.grad_mode import no_grad
def _get_foreach_kernels_supported_devices() -> list[str]:
r"""Return the device type list that supports foreach kernels."""
return ["cuda", "xpu", "mtia", torch._C._get_privateuse1_backend_name()]
def _get_fused_kernels_supported_devices() -> list[str]:
r"""Return the device type list that supports fused kernels in optimizer."""
return [
"mps",
"cuda",
"xpu",
"hpu",
"cpu",
"mtia",
torch._C._get_privateuse1_backend_name(),
]
TensorListList: TypeAlias = list[list[Optional[Tensor]]]
Indices: TypeAlias = list[int]
_foreach_supported_types = [torch.Tensor]
# This util function splits tensors into groups by device and dtype, which is useful before sending
# tensors off to a foreach implementation, which requires tensors to be on one device and dtype.
# If tensorlistlist contains more than one tensorlist, the following assumptions are made BUT NOT verified:
# - tensorlists CAN be None
# - all tensors in the first specified list cannot be None
# - given an index i, all specified tensorlist[i]s match in dtype and device
# with_indices (bool, optional): whether to track previous indices as the last list per dictionary entry.
# It comes in handy if there are Nones or literals in the tensorlists that are getting scattered out.
# Whereas mutating a tensor in the resulting split-up tensorlists WILL propagate changes back to the
# original input tensorlists, changing up Nones/literals WILL NOT propagate, and manual propagation
# may be necessary. Check out torch/optim/sgd.py for an example.
@no_grad()
def _group_tensors_by_device_and_dtype(
tensorlistlist: TensorListList,
with_indices: bool = False,
) -> dict[tuple[torch.device, torch.dtype], tuple[TensorListList, Indices]]:
return torch._C._group_tensors_by_device_and_dtype(tensorlistlist, with_indices)
def _device_has_foreach_support(device: torch.device) -> bool:
return (
device.type in (_get_foreach_kernels_supported_devices() + ["cpu"])
and not torch.jit.is_scripting()
)
def _has_foreach_support(tensors: list[Tensor], device: torch.device) -> bool:
return _device_has_foreach_support(device) and all(
t is None or type(t) in _foreach_supported_types for t in tensors
)
|
from typing import Optional
from typing_extensions import TypeAlias
import torch
from torch import Tensor
from torch.autograd.grad_mode import no_grad
def _get_foreach_kernels_supported_devices() -> list[str]:
r"""Return the device type list that supports foreach kernels."""
return ["cuda", "xpu", torch._C._get_privateuse1_backend_name()]
def _get_fused_kernels_supported_devices() -> list[str]:
r"""Return the device type list that supports fused kernels in optimizer."""
return [
"mps",
"cuda",
"xpu",
"hpu",
"cpu",
torch._C._get_privateuse1_backend_name(),
]
TensorListList: TypeAlias = list[list[Optional[Tensor]]]
Indices: TypeAlias = list[int]
_foreach_supported_types = [torch.Tensor]
# This util function splits tensors into groups by device and dtype, which is useful before sending
# tensors off to a foreach implementation, which requires tensors to be on one device and dtype.
# If tensorlistlist contains more than one tensorlist, the following assumptions are made BUT NOT verified:
# - tensorlists CAN be None
# - all tensors in the first specified list cannot be None
# - given an index i, all specified tensorlist[i]s match in dtype and device
# with_indices (bool, optional): whether to track previous indices as the last list per dictionary entry.
# It comes in handy if there are Nones or literals in the tensorlists that are getting scattered out.
# Whereas mutating a tensor in the resulting split-up tensorlists WILL propagate changes back to the
# original input tensorlists, changing up Nones/literals WILL NOT propagate, and manual propagation
# may be necessary. Check out torch/optim/sgd.py for an example.
@no_grad()
def _group_tensors_by_device_and_dtype(
tensorlistlist: TensorListList,
with_indices: bool = False,
) -> dict[tuple[torch.device, torch.dtype], tuple[TensorListList, Indices]]:
return torch._C._group_tensors_by_device_and_dtype(tensorlistlist, with_indices)
def _device_has_foreach_support(device: torch.device) -> bool:
return (
device.type in (_get_foreach_kernels_supported_devices() + ["cpu"])
and not torch.jit.is_scripting()
)
def _has_foreach_support(tensors: list[Tensor], device: torch.device) -> bool:
return _device_has_foreach_support(device) and all(
t is None or type(t) in _foreach_supported_types for t in tensors
)
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
table.append(f'| `{k}` | {desc} | `{v["type"]}` | `{v["default"]}` |')
with open(f'../docs/concepts/flow/{s}-args.md', 'w') as fp:
fp.write('\n'.join(table))
|
from jina.schemas.helper import _cli_to_schema
from jina_cli.export import api_to_dict
for s in ('flow', 'gateway', 'executor'):
a = _cli_to_schema(api_to_dict(), s)
table = ['| Name | Description | Type | Default |', '|----|----|----|----|']
for k, v in a[f'Jina::{s.capitalize()}']['properties'].items():
desc = v["description"].replace("\n", "<br>")
if k in ('port', 'port_monitoring'):
v[
'default'
] = 'random in [49152, 65535]' # avoid random numbers cause devbot forever committing
table.append(f'| `{k}` | {desc} | `{v["type"]}` | `{v["default"]}` |')
with open(f'../docs/fundamentals/flow/{s}-args.md', 'w') as fp:
fp.write('\n'.join(table))
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.6.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.9.10",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"optimum_quanto": "optimum_quanto>=0.2.6",
"gguf": "gguf>=0.10.0",
"torchao": "torchao>=0.7.0",
"bitsandbytes": "bitsandbytes>=0.43.3",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.6.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.1.5",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"optimum_quanto": "optimum_quanto>=0.2.6",
"gguf": "gguf>=0.10.0",
"torchao": "torchao>=0.7.0",
"bitsandbytes": "bitsandbytes>=0.43.3",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
}
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
_proto_type_name: str
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
|
from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Type, TypeVar
from pydantic import BaseConfig
from pydantic.fields import ModelField
from docarray.base_document.base_node import BaseNode
if TYPE_CHECKING:
from docarray.proto import NodeProto
T = TypeVar('T')
class AbstractType(BaseNode):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
@abstractmethod
def validate(
cls: Type[T],
value: Any,
field: 'ModelField',
config: 'BaseConfig',
) -> T:
...
@classmethod
@abstractmethod
def from_protobuf(cls: Type[T], pb_msg: T) -> T:
...
@abstractmethod
def _to_node_protobuf(self: T) -> 'NodeProto':
...
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .mask_pseudo_sampler import MaskPseudoSampler
from .mask_sampling_result import MaskSamplingResult
from .multi_instance_random_sampler import MultiInsRandomSampler
from .multi_instance_sampling_result import MultiInstanceSamplingResult
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',
'MaskSamplingResult', 'MultiInstanceSamplingResult',
'MultiInsRandomSampler'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base_sampler import BaseSampler
from .combined_sampler import CombinedSampler
from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
from .iou_balanced_neg_sampler import IoUBalancedNegSampler
from .mask_pseudo_sampler import MaskPseudoSampler
from .mask_sampling_result import MaskSamplingResult
from .ohem_sampler import OHEMSampler
from .pseudo_sampler import PseudoSampler
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from .score_hlr_sampler import ScoreHLRSampler
__all__ = [
'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',
'MaskSamplingResult'
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import CollegeConfidentialLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CollegeConfidentialLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CollegeConfidentialLoader",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import CollegeConfidentialLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"CollegeConfidentialLoader": "langchain_community.document_loaders"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"CollegeConfidentialLoader",
]
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from pydantic import Field
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl] = Field(
description='URL to a (potentially remote) video file that needs to be loaded',
example='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
default=None,
)
audio: Optional[AudioDoc] = Field(
description='Audio document associated with the video',
default=None,
)
tensor: Optional[VideoTensor] = Field(
description='Tensor object representing the video which be specified to one of `VideoNdArray`, `VideoTorchTensor`, `VideoTensorFlowTensor`',
default=None,
)
key_frame_indices: Optional[AnyTensor] = Field(
description='List of all the key frames in the video',
example=[0, 1, 2, 3, 4],
default=None,
)
embedding: Optional[AnyEmbedding] = Field(
description='Store an embedding: a vector representation of the video',
example=[1, 0, 1],
default=None,
)
bytes_: Optional[VideoBytes] = Field(
description='Bytes representation of the video',
default=None,
)
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
from typing import TYPE_CHECKING, Any, Optional, Type, TypeVar, Union
import numpy as np
from docarray.base_doc import BaseDoc
from docarray.documents import AudioDoc
from docarray.typing import AnyEmbedding, AnyTensor, VideoBytes
from docarray.typing.tensor.abstract_tensor import AbstractTensor
from docarray.typing.tensor.video.video_tensor import VideoTensor
from docarray.typing.url.video_url import VideoUrl
from docarray.utils._internal.misc import import_library
if TYPE_CHECKING:
import tensorflow as tf # type: ignore
import torch
else:
tf = import_library('tensorflow', raise_error=False)
torch = import_library('torch', raise_error=False)
T = TypeVar('T', bound='VideoDoc')
class VideoDoc(BaseDoc):
"""
Document for handling video.
The Video Document can contain:
- a [`VideoUrl`][docarray.typing.url.VideoUrl] (`VideoDoc.url`)
- an [`AudioDoc`][docarray.documents.AudioDoc] (`VideoDoc.audio`)
- a [`VideoTensor`](../../../api_references/typing/tensor/video) (`VideoDoc.tensor`)
- an [`AnyTensor`](../../../api_references/typing/tensor/tensor) representing the indices of the video's key frames (`VideoDoc.key_frame_indices`)
- an [`AnyEmbedding`](../../../api_references/typing/tensor/embedding) (`VideoDoc.embedding`)
- a [`VideoBytes`][docarray.typing.bytes.VideoBytes] object (`VideoDoc.bytes_`)
You can use this Document directly:
```python
from docarray.documents import VideoDoc
# use it directly
vid = VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
# model = MyEmbeddingModel()
# vid.embedding = model(vid.tensor)
```
You can extend this Document:
```python
from typing import Optional
from docarray.documents import TextDoc, VideoDoc
# extend it
class MyVideo(VideoDoc):
name: Optional[TextDoc]
video = MyVideo(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
video.name = TextDoc(text='my first video')
video.tensor = video.url.load().video
# model = MyEmbeddingModel()
# video.embedding = model(video.tensor)
```
You can use this Document for composition:
```python
from docarray import BaseDoc
from docarray.documents import TextDoc, VideoDoc
# compose it
class MultiModalDoc(BaseDoc):
video: VideoDoc
text: TextDoc
mmdoc = MultiModalDoc(
video=VideoDoc(
url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
),
text=TextDoc(text='hello world, how are you doing?'),
)
mmdoc.video.tensor = mmdoc.video.url.load().video
# or
mmdoc.video.bytes_ = mmdoc.video.url.load_bytes()
mmdoc.video.tensor = mmdoc.video.bytes_.load().video
```
"""
url: Optional[VideoUrl] = None
audio: Optional[AudioDoc] = AudioDoc()
tensor: Optional[VideoTensor] = None
key_frame_indices: Optional[AnyTensor] = None
embedding: Optional[AnyEmbedding] = None
bytes_: Optional[VideoBytes] = None
@classmethod
def validate(
cls: Type[T],
value: Union[str, AbstractTensor, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
elif isinstance(value, (AbstractTensor, np.ndarray)) or (
torch is not None
and isinstance(value, torch.Tensor)
or (tf is not None and isinstance(value, tf.Tensor))
):
value = cls(tensor=value)
return super().validate(value)
|
import logging
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for all NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = evaluator(model)
# Print the results
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
|
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseNanoBEIREvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Create evaluator for all NanoBEIR datasets
evaluator = SparseNanoBEIREvaluator(
dataset_names=None, # None means evaluate on all datasets
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
print("Starting evaluation on all NanoBEIR datasets")
results = evaluator(model)
print(f"Primary metric: {evaluator.primary_metric}")
print(f"Primary metric value: {results[evaluator.primary_metric]:.4f}")
# Print results for each dataset
for key, value in results.items():
if key.startswith("Nano"):
print(f"{key}: {value:.4f}")
|
import numpy as np
from docarray import DocumentArray, Document, dataclass
from docarray.typing import Text
from jina import Executor, Flow, requests
def test_specific_params():
class MyExec(Executor):
def __init__(self, params_awaited, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params_awaited = params_awaited
@requests
def process(self, docs, parameters, **kwargs):
for doc in docs:
doc.tags['assert'] = parameters == self.params_awaited
flow = (
Flow()
.add(uses=MyExec, name='exec1', uses_with={'params_awaited': {'key_1': True}})
.add(
uses=MyExec,
name='exec2',
uses_with={'params_awaited': {'key_1': True, 'key_2': False}},
)
)
with flow:
docs = flow.index(
DocumentArray.empty(size=1),
parameters={'key_1': True, 'exec2__key_2': False},
)
assert docs[0].tags['assert']
def test_specific_params_with_branched_flow():
class TextEncoderTestSpecific(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = lambda t: np.random.rand(
len(t), 128
) # initialize dummy text embedding model
@requests(on='/encode')
def encode_text(self, docs, parameters, **kwargs):
path = parameters.get('access_path', None)
text_docs = docs[path]
embeddings = self.model(text_docs[:, 'text'])
text_docs.embeddings = embeddings
class EmbeddingCombinerTestSpecific(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = lambda emb1, emb2: np.concatenate(
[emb1, emb2], axis=1
) # initialize dummy model to combine embeddings
@requests(on='/encode')
def combine(self, docs, parameters, **kwargs):
text1_path = parameters.get('text1_access_path', None)
text2_path = parameters.get('text2_access_path', None)
assert text1_path == '@.[text1]'
assert text2_path == '@.[text2]'
text1_docs = docs[text1_path]
text2_docs = docs[text2_path]
combined_embeddings = self.model(text1_docs.embeddings, text2_docs.embeddings)
docs.embeddings = combined_embeddings
@dataclass
class MMDoc:
text1: Text
text2: Text
mmdoc_dataclass = MMDoc(text1='text 1', text2='text 2')
da = DocumentArray([Document(mmdoc_dataclass)])
f = (
Flow()
.add(uses=TextEncoderTestSpecific, name='Text1Encoder')
.add(uses=TextEncoderTestSpecific, name='Text2Encoder', needs='gateway')
.add(uses=EmbeddingCombinerTestSpecific, name='Combiner', needs=['Text1Encoder', 'Text2Encoder'])
)
with f:
da = f.post(
inputs=da,
on='/encode',
parameters={
'Text1Encoder__access_path': '@.[text1]',
'Text2Encoder__access_path': '@.[text2]',
'Combiner__text1_access_path': '@.[text1]',
'Combiner__text2_access_path': '@.[text2]',
},
)
assert len(da) == 1
for d in da:
assert d.embedding.shape == (256, )
|
import copy
from docarray import DocumentArray
from jina import Executor, Flow, requests
def test_specific_params():
class MyExec(Executor):
def __init__(self, params_awaited, *args, **kwargs):
super().__init__(*args, **kwargs)
self.params_awaited = params_awaited
@requests
def process(self, docs, parameters, **kwargs):
docs[0].tags['assert'] = parameters == self.params_awaited
flow = (
Flow()
.add(uses=MyExec, name='exec1', uses_with={'params_awaited': {'key_1': True}})
.add(
uses=MyExec,
name='exec2',
uses_with={'params_awaited': {'key_1': True, 'key_2': False}},
)
)
with flow:
docs = flow.index(
DocumentArray.empty(size=1),
parameters={'key_1': True, 'exec2__key_2': False},
)
assert docs[0].tags['assert']
|
import json
import os
from typing import List
import torch
from torch import nn
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1, 3, 5],
stride_sizes: List[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
model = CNN(**config)
model.load_state_dict(weights)
return model
|
import torch
from torch import nn
from typing import List
import os
import json
class CNN(nn.Module):
"""CNN-layer with multiple kernel-sizes over the word embeddings"""
def __init__(
self,
in_word_embedding_dimension: int,
out_channels: int = 256,
kernel_sizes: List[int] = [1, 3, 5],
stride_sizes: List[int] = None,
):
nn.Module.__init__(self)
self.config_keys = ["in_word_embedding_dimension", "out_channels", "kernel_sizes"]
self.in_word_embedding_dimension = in_word_embedding_dimension
self.out_channels = out_channels
self.kernel_sizes = kernel_sizes
self.embeddings_dimension = out_channels * len(kernel_sizes)
self.convs = nn.ModuleList()
in_channels = in_word_embedding_dimension
if stride_sizes is None:
stride_sizes = [1] * len(kernel_sizes)
for kernel_size, stride in zip(kernel_sizes, stride_sizes):
padding_size = int((kernel_size - 1) / 2)
conv = nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding_size,
)
self.convs.append(conv)
def forward(self, features):
token_embeddings = features["token_embeddings"]
token_embeddings = token_embeddings.transpose(1, -1)
vectors = [conv(token_embeddings) for conv in self.convs]
out = torch.cat(vectors, 1).transpose(1, -1)
features.update({"token_embeddings": out})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str, **kwargs) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, "cnn_config.json"), "w") as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
@staticmethod
def load(input_path: str):
with open(os.path.join(input_path, "cnn_config.json"), "r") as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, "pytorch_model.bin"), map_location=torch.device("cpu"))
model = CNN(**config)
model.load_state_dict(weights)
return model
|
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`.
This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form
```
<{url}|New failed tests>
{
"GH_ydshieh": {
"vit": 1
}
}
```
"""
import json
import os
from collections import Counter
from copy import deepcopy
from get_previous_daily_ci import get_last_daily_ci_run
from huggingface_hub import HfApi
if __name__ == "__main__":
api = HfApi()
with open("new_model_failures_with_bad_commit.json") as fp:
data = json.load(fp)
with open("ci_results_run_models_gpu/model_job_links.json") as fp:
model_job_links = json.load(fp)
# TODO: extend
team_members = [
"ydshieh",
"zucchini-nlp",
"ArthurZucker",
"gante",
"LysandreJik",
"molbap",
"qubvel",
"Rocketknight1",
"muellerzr",
"SunMarc",
]
# Counting the number of failures grouped by authors
new_data = {}
for model, model_result in data.items():
for device, failed_tests in model_result.items():
for failed_test in failed_tests:
author = failed_test["author"]
if author not in team_members:
author = failed_test["merged_by"]
if author not in new_data:
new_data[author] = Counter()
new_data[author].update([model])
for author in new_data:
new_data[author] = dict(new_data[author])
# Group by author
new_data_full = {author: deepcopy(data) for author in new_data}
for author, _data in new_data_full.items():
for model, model_result in _data.items():
for device, failed_tests in model_result.items():
# prepare job_link and add it to each entry of new failed test information.
# need to change from `single-gpu` to `single` and same for `multi-gpu` to match `job_link`.
job_link = model_job_links[model][device.replace("-gpu", "")]
failed_tests = [x for x in failed_tests if x["author"] == author or x["merged_by"] == author]
for x in failed_tests:
x.update({"job_link": job_link})
model_result[device] = failed_tests
_data[model] = {k: v for k, v in model_result.items() if len(v) > 0}
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
# Upload to Hub and get the url
# if it is not a scheduled run, upload the reports to a subfolder under `report_repo_folder`
report_repo_subfolder = ""
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
report_repo_subfolder = f"runs/{report_repo_subfolder}"
workflow_run = get_last_daily_ci_run(
token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv("GITHUB_RUN_ID")
)
workflow_run_created_time = workflow_run["created_at"]
report_repo_folder = workflow_run_created_time.split("T")[0]
if report_repo_subfolder:
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
commit_info = api.upload_file(
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
path_in_repo=f"{report_repo_folder}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
repo_id="hf-internal-testing/transformers_daily_ci",
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{report_repo_folder}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
# Add `GH_` prefix as keyword mention
output = {}
for author, item in new_data.items():
author = f"GH_{author}"
output[author] = item
report = f"<{url}|New failed tests>\\n\\n"
report += json.dumps(output, indent=4).replace('"', '\\"').replace("\n", "\\n")
print(report)
|
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`.
This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form
```
<{url}|New failed tests>
{
"GH_ydshieh": {
"vit": 1
}
}
```
"""
import datetime
import json
import os
from collections import Counter
from copy import deepcopy
from huggingface_hub import HfApi
if __name__ == "__main__":
api = HfApi()
with open("new_model_failures_with_bad_commit.json") as fp:
data = json.load(fp)
with open("ci_results_run_models_gpu/model_job_links.json") as fp:
model_job_links = json.load(fp)
# TODO: extend
team_members = [
"ydshieh",
"zucchini-nlp",
"ArthurZucker",
"gante",
"LysandreJik",
"molbap",
"qubvel",
"Rocketknight1",
"muellerzr",
"SunMarc",
]
# Counting the number of failures grouped by authors
new_data = {}
for model, model_result in data.items():
for device, failed_tests in model_result.items():
for failed_test in failed_tests:
author = failed_test["author"]
if author not in team_members:
author = failed_test["merged_by"]
if author not in new_data:
new_data[author] = Counter()
new_data[author].update([model])
for author in new_data:
new_data[author] = dict(new_data[author])
# Group by author
new_data_full = {author: deepcopy(data) for author in new_data}
for author, _data in new_data_full.items():
for model, model_result in _data.items():
for device, failed_tests in model_result.items():
# prepare job_link and add it to each entry of new failed test information.
# need to change from `single-gpu` to `single` and same for `multi-gpu` to match `job_link`.
job_link = model_job_links[model][device.replace("-gpu", "")]
failed_tests = [x for x in failed_tests if x["author"] == author or x["merged_by"] == author]
for x in failed_tests:
x.update({"job_link": job_link})
model_result[device] = failed_tests
_data[model] = {k: v for k, v in model_result.items() if len(v) > 0}
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
# Upload to Hub and get the url
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
commit_info = api.upload_file(
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
repo_id="hf-internal-testing/transformers_daily_ci",
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
# Add `GH_` prefix as keyword mention
output = {}
for author, item in new_data.items():
author = f"GH_{author}"
output[author] = item
report = f"<{url}|New failed tests>\\n\\n"
report += json.dumps(output, indent=4).replace('"', '\\"').replace("\n", "\\n")
print(report)
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* :cite:`cosentino2020librimix` dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360``, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
from pathlib import Path
from typing import List, Tuple, Union
import torch
import torchaudio
from torch.utils.data import Dataset
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class LibriMix(Dataset):
r"""Create the *LibriMix* [:footcite:`cosentino2020librimix`] dataset.
Args:
root (str or Path): The path to the directory where the directory ``Libri2Mix`` or
``Libri3Mix`` is stored.
subset (str, optional): The subset to use. Options: [``train-360``, ``train-100``,
``dev``, and ``test``] (Default: ``train-360``).
num_speakers (int, optional): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios. (Default: 2)
sample_rate (int, optional): sample rate of audio files. The ``sample_rate`` determines
which subdirectory the audio are fetched. If any of the audio has a different sample
rate, raises ``ValueError``. Options: [8000, 16000] (Default: 8000)
task (str, optional): the task of LibriMix.
Options: [``enh_single``, ``enh_both``, ``sep_clean``, ``sep_noisy``]
(Default: ``sep_clean``)
Note:
The LibriMix dataset needs to be manually generated. Please check https://github.com/JorisCos/LibriMix
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train-360",
num_speakers: int = 2,
sample_rate: int = 8000,
task: str = "sep_clean",
):
self.root = Path(root) / f"Libri{num_speakers}Mix"
if sample_rate == 8000:
self.root = self.root / "wav8k/min" / subset
elif sample_rate == 16000:
self.root = self.root / "wav16k/min" / subset
else:
raise ValueError(f"Unsupported sample rate. Found {sample_rate}.")
self.sample_rate = sample_rate
self.task = task
self.mix_dir = (self.root / f"mix_{task.split('_')[1]}").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob("*wav")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}")
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
(int, Tensor, List[Tensor]): ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if set(translation_dict) == {"language", "translation"}:
return translation_dict
elif self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class Translation:
"""`FeatureConnector` for translations with fixed languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to string translations.
Example:
```python
>>> # At construction time:
>>> datasets.features.Translation(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': 'le chat',
... 'de': 'die katze'
... }
```
"""
languages: List[str]
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="Translation", init=False, repr=False)
def __call__(self):
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the Translation feature into a dictionary."""
from .features import Value
return {k: Value("string") for k in sorted(self.languages)}
@dataclass
class TranslationVariableLanguages:
"""`FeatureConnector` for translations with variable languages per example.
Here for compatiblity with tfds.
Args:
languages (`dict`):
A dictionary for each example mapping string language codes to one or more string translations.
The languages present may vary from example to example.
Returns:
- `language` or `translation` (variable-length 1D `tf.Tensor` of `tf.string`):
Language codes sorted in ascending order or plain text translations, sorted to align with language codes.
Example:
```python
>>> # At construction time:
>>> datasets.features.TranslationVariableLanguages(languages=['en', 'fr', 'de'])
>>> # During data generation:
>>> yield {
... 'en': 'the cat',
... 'fr': ['le chat', 'la chatte,']
... 'de': 'die katze'
... }
>>> # Tensor returned :
>>> {
... 'language': ['en', 'de', 'fr', 'fr'],
... 'translation': ['the cat', 'die katze', 'la chatte', 'le chat'],
... }
```
"""
languages: Optional[List] = None
num_languages: Optional[int] = None
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "dict"
pa_type: ClassVar[Any] = None
_type: str = field(default="TranslationVariableLanguages", init=False, repr=False)
def __post_init__(self):
self.languages = sorted(set(self.languages)) if self.languages else None
self.num_languages = len(self.languages) if self.languages else None
def __call__(self):
return pa.struct({"language": pa.list_(pa.string()), "translation": pa.list_(pa.string())})
def encode_example(self, translation_dict):
lang_set = set(self.languages)
if self.languages and set(translation_dict) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({", ".join(lang_set)}).'
)
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
translation_tuples = []
for lang, text in translation_dict.items():
if isinstance(text, str):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
languages, translations = zip(*sorted(translation_tuples))
return {"language": languages, "translation": translations}
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""Flatten the TranslationVariableLanguages feature into a dictionary."""
from .features import Sequence, Value
return {
"language": Sequence(Value("string")),
"translation": Sequence(Value("string")),
}
|
# pylint: disable=too-many-locals
"""Tests for learning to rank."""
from types import ModuleType
from typing import Any
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
def run_ranking_qid_df(impl: ModuleType, tree_method: str) -> None:
"""Test ranking with qid packed into X."""
import scipy.sparse
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedGroupKFold, cross_val_score
X, y, q, _ = tm.make_ltr(n_samples=128, n_features=2, n_query_groups=8, max_rel=3)
# pack qid into x using dataframe
df = impl.DataFrame(X)
df["qid"] = q
ranker = xgb.XGBRanker(n_estimators=3, eval_metric="ndcg", tree_method=tree_method)
ranker.fit(df, y)
s = ranker.score(df, y)
assert s > 0.7
# works with validation datasets as well
valid_df = df.copy()
valid_df.iloc[0, 0] = 3.0
ranker.fit(df, y, eval_set=[(valid_df, y)])
# same as passing qid directly
ranker = xgb.XGBRanker(n_estimators=3, eval_metric="ndcg", tree_method=tree_method)
ranker.fit(X, y, qid=q)
s1 = ranker.score(df, y)
assert np.isclose(s, s1)
# Works with standard sklearn cv
if tree_method != "gpu_hist":
# we need cuML for this.
kfold = StratifiedGroupKFold(shuffle=False)
results = cross_val_score(ranker, df, y, cv=kfold, groups=df.qid)
assert len(results) == 5
# Works with custom metric
def neg_mse(*args: Any, **kwargs: Any) -> float:
return -float(mean_squared_error(*args, **kwargs))
ranker = xgb.XGBRanker(
n_estimators=3,
eval_metric=neg_mse,
tree_method=tree_method,
disable_default_eval_metric=True,
)
ranker.fit(df, y, eval_set=[(valid_df, y)])
score = ranker.score(valid_df, y)
assert np.isclose(score, ranker.evals_result()["validation_0"]["neg_mse"][-1])
# Works with sparse data
if tree_method != "gpu_hist":
# no sparse with cuDF
X_csr = scipy.sparse.csr_matrix(X)
df = impl.DataFrame.sparse.from_spmatrix(
X_csr, columns=[str(i) for i in range(X.shape[1])]
)
df["qid"] = q
ranker = xgb.XGBRanker(
n_estimators=3, eval_metric="ndcg", tree_method=tree_method
)
ranker.fit(df, y)
s2 = ranker.score(df, y)
assert np.isclose(s2, s)
with pytest.raises(ValueError, match="Either `group` or `qid`."):
ranker.fit(df, y, eval_set=[(X, y)])
def run_ranking_categorical(device: str) -> None:
"""Test LTR with categorical features."""
from sklearn.model_selection import cross_val_score
X, y = tm.make_categorical(
n_samples=512, n_features=10, n_categories=3, onehot=False
)
rng = np.random.default_rng(1994)
qid = rng.choice(3, size=y.shape[0])
qid = np.sort(qid)
X["qid"] = qid
ltr = xgb.XGBRanker(enable_categorical=True, device=device)
ltr.fit(X, y)
score = ltr.score(X, y)
assert score > 0.9
ltr = xgb.XGBRanker(enable_categorical=True, device=device)
# test using the score function inside sklearn.
scores = cross_val_score(ltr, X, y)
for s in scores:
assert s > 0.7
|
# pylint: disable=too-many-locals
"""Tests for learning to rank."""
from types import ModuleType
from typing import Any
import numpy as np
import pytest
import xgboost as xgb
from xgboost import testing as tm
def run_ranking_qid_df(impl: ModuleType, tree_method: str) -> None:
"""Test ranking with qid packed into X."""
import scipy.sparse
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import StratifiedGroupKFold, cross_val_score
X, y, q, _ = tm.make_ltr(n_samples=128, n_features=2, n_query_groups=8, max_rel=3)
# pack qid into x using dataframe
df = impl.DataFrame(X)
df["qid"] = q
ranker = xgb.XGBRanker(n_estimators=3, eval_metric="ndcg", tree_method=tree_method)
ranker.fit(df, y)
s = ranker.score(df, y)
assert s > 0.7
# works with validation datasets as well
valid_df = df.copy()
valid_df.iloc[0, 0] = 3.0
ranker.fit(df, y, eval_set=[(valid_df, y)])
# same as passing qid directly
ranker = xgb.XGBRanker(n_estimators=3, eval_metric="ndcg", tree_method=tree_method)
ranker.fit(X, y, qid=q)
s1 = ranker.score(df, y)
assert np.isclose(s, s1)
# Works with standard sklearn cv
if tree_method != "gpu_hist":
# we need cuML for this.
kfold = StratifiedGroupKFold(shuffle=False)
results = cross_val_score(ranker, df, y, cv=kfold, groups=df.qid)
assert len(results) == 5
# Works with custom metric
def neg_mse(*args: Any, **kwargs: Any) -> float:
return -float(mean_squared_error(*args, **kwargs))
ranker = xgb.XGBRanker(
n_estimators=3,
eval_metric=neg_mse,
tree_method=tree_method,
disable_default_eval_metric=True,
)
ranker.fit(df, y, eval_set=[(valid_df, y)])
score = ranker.score(valid_df, y)
assert np.isclose(score, ranker.evals_result()["validation_0"]["neg_mse"][-1])
# Works with sparse data
if tree_method != "gpu_hist":
# no sparse with cuDF
X_csr = scipy.sparse.csr_matrix(X)
df = impl.DataFrame.sparse.from_spmatrix(
X_csr, columns=[str(i) for i in range(X.shape[1])]
)
df["qid"] = q
ranker = xgb.XGBRanker(
n_estimators=3, eval_metric="ndcg", tree_method=tree_method
)
ranker.fit(df, y)
s2 = ranker.score(df, y)
assert np.isclose(s2, s)
with pytest.raises(ValueError, match="Either `group` or `qid`."):
ranker.fit(df, y, eval_set=[(X, y)])
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmdet.models.dense_heads import YOLOV3Head
class TestYOLOV3Head(TestCase):
def test_yolo_head_loss(self):
"""Tests YOLO head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
head = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[1, 1, 1],
train_cfg=Config(
dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0))))
head.init_weights()
# YOLO head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in head.prior_generator.strides
]
predmaps, = head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = head.loss_by_feat(predmaps, [gt_instances],
img_metas)
# When there is no truth, the conf loss should be nonzero but
# cls loss and xy&wh loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_conf_loss = sum(empty_gt_losses['loss_conf']).item()
empty_xy_loss = sum(empty_gt_losses['loss_xy']).item()
empty_wh_loss = sum(empty_gt_losses['loss_wh']).item()
self.assertGreater(empty_conf_loss, 0, 'conf loss should be non-zero')
self.assertEqual(
empty_cls_loss, 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_xy_loss, 0,
'there should be no xy loss when there are no true boxes')
self.assertEqual(
empty_wh_loss, 0,
'there should be no wh loss when there are no true boxes')
# When truth is non-empty then all conf, cls loss and xywh loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = head.loss_by_feat(predmaps, [gt_instances], img_metas)
one_gt_cls_loss = sum(one_gt_losses['loss_cls']).item()
one_gt_conf_loss = sum(one_gt_losses['loss_conf']).item()
one_gt_xy_loss = sum(one_gt_losses['loss_xy']).item()
one_gt_wh_loss = sum(one_gt_losses['loss_wh']).item()
self.assertGreater(one_gt_conf_loss, 0, 'conf loss should be non-zero')
self.assertGreater(one_gt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(one_gt_xy_loss, 0, 'xy loss should be non-zero')
self.assertGreater(one_gt_wh_loss, 0, 'wh loss should be non-zero')
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.data import InstanceData
from mmdet.models.dense_heads import YOLOV3Head
class TestYOLOV3Head(TestCase):
def test_yolo_head_loss(self):
"""Tests YOLO head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
head = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[1, 1, 1],
train_cfg=Config(
dict(
assigner=dict(
type='GridAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0))))
head.init_weights()
# YOLO head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, s // stride[1], s // stride[0])
for stride in head.prior_generator.strides
]
predmaps, = head.forward(feats)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
empty_gt_losses = head.loss(predmaps, [gt_instances], img_metas)
# When there is no truth, the conf loss should be nonzero but
# cls loss and xy&wh loss should be zero
empty_cls_loss = sum(empty_gt_losses['loss_cls']).item()
empty_conf_loss = sum(empty_gt_losses['loss_conf']).item()
empty_xy_loss = sum(empty_gt_losses['loss_xy']).item()
empty_wh_loss = sum(empty_gt_losses['loss_wh']).item()
self.assertGreater(empty_conf_loss, 0, 'conf loss should be non-zero')
self.assertEqual(
empty_cls_loss, 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_xy_loss, 0,
'there should be no xy loss when there are no true boxes')
self.assertEqual(
empty_wh_loss, 0,
'there should be no wh loss when there are no true boxes')
# When truth is non-empty then all conf, cls loss and xywh loss
# should be nonzero for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
one_gt_losses = head.loss(predmaps, [gt_instances], img_metas)
one_gt_cls_loss = sum(one_gt_losses['loss_cls']).item()
one_gt_conf_loss = sum(one_gt_losses['loss_conf']).item()
one_gt_xy_loss = sum(one_gt_losses['loss_xy']).item()
one_gt_wh_loss = sum(one_gt_losses['loss_wh']).item()
self.assertGreater(one_gt_conf_loss, 0, 'conf loss should be non-zero')
self.assertGreater(one_gt_cls_loss, 0, 'cls loss should be non-zero')
self.assertGreater(one_gt_xy_loss, 0, 'xy loss should be non-zero')
self.assertGreater(one_gt_wh_loss, 0, 'wh loss should be non-zero')
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def csv_file(tmp_path):
filename = tmp_path / "file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def malformed_csv_file(tmp_path):
filename = tmp_path / "malformed_file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_image(tmp_path, image_file):
filename = tmp_path / "csv_with_image.csv"
data = textwrap.dedent(
f"""\
image
{image_file}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_label(tmp_path):
filename = tmp_path / "csv_with_label.csv"
data = textwrap.dedent(
"""\
label
good
bad
good
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_int_list(tmp_path):
filename = tmp_path / "csv_with_int_list.csv"
data = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog):
csv = Csv()
generator = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(ValueError, match="Error tokenizing data"):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(malformed_csv_file) in record.message
for record in caplog.records
)
@require_pil
def test_csv_cast_image(csv_file_with_image):
with open(csv_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[1]
csv = Csv(encoding="utf-8", features=Features({"image": Image()}))
generator = csv._generate_tables([[csv_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def test_csv_cast_label(csv_file_with_label):
with open(csv_file_with_label, encoding="utf-8") as f:
labels = f.read().splitlines()[1:]
csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])}))
generator = csv._generate_tables([[csv_file_with_label]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])()
generated_content = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels]
def test_csv_convert_int_list(csv_file_with_int_list):
csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]})
generator = csv._generate_tables([[csv_file_with_int_list]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa.types.is_list(pa_table.schema.field("int_list").type)
generated_content = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def csv_file(tmp_path):
filename = tmp_path / "file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def malformed_csv_file(tmp_path):
filename = tmp_path / "malformed_file.csv"
data = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_image(tmp_path, image_file):
filename = tmp_path / "csv_with_image.csv"
data = textwrap.dedent(
f"""\
image
{image_file}
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
@pytest.fixture
def csv_file_with_label(tmp_path):
filename = tmp_path / "csv_with_label.csv"
data = textwrap.dedent(
"""\
label
good
bad
good
"""
)
with open(filename, "w") as f:
f.write(data)
return str(filename)
def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog):
csv = Csv()
generator = csv._generate_tables([[csv_file, malformed_csv_file]])
with pytest.raises(ValueError, match="Error tokenizing data"):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(malformed_csv_file) in record.message
for record in caplog.records
)
@require_pil
def test_csv_cast_image(csv_file_with_image):
with open(csv_file_with_image, encoding="utf-8") as f:
image_file = f.read().splitlines()[1]
csv = Csv(encoding="utf-8", features=Features({"image": Image()}))
generator = csv._generate_tables([[csv_file_with_image]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("image").type == Image()()
generated_content = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
@require_pil
def test_csv_cast_label(csv_file_with_label):
with open(csv_file_with_label, encoding="utf-8") as f:
labels = f.read().splitlines()[1:]
csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])}))
generator = csv._generate_tables([[csv_file_with_label]])
pa_table = pa.concat_tables([table for _, table in generator])
assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])()
generated_content = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels]
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
"""
This class implements CoSENT (Cosine Sentence).
It expects that each of the InputExamples consists of a pair of texts and a float valued label, representing
the expected similarity score between the pair.
It computes the following loss function:
``loss = logsum(1+exp(s(i,j)-s(k,l))+exp...)``, where ``(i,j)`` and ``(k,l)`` are any of the input pairs in the
batch such that the expected similarity of ``(i,j)`` is greater than ``(k,l)``. The summation is over all possible
pairs of input pairs in the batch that match this condition.
Anecdotal experiments show that this loss function produces a more powerful training signal than :class:`SparseCosineSimilarityLoss`,
resulting in faster convergence and a final model with superior performance. Consequently, SparseCoSENTLoss may be used
as a drop-in replacement for :class:`SparseCosineSimilarityLoss` in any training script.
Args:
model: SparseEncoder
similarity_fct: Function to compute the PAIRWISE similarity
between embeddings. Default is
``util.pairwise_cos_sim``.
scale: Output of similarity function is multiplied by scale
value. Represents the inverse temperature.
References:
- For further details, see: https://kexue.fm/archives/8847
Requirements:
- Sentence pairs with corresponding similarity scores in range of the similarity function. Default is [-1,1].
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseAnglELoss` is SparseCoSENTLoss with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``.
- :class:`SparseCosineSimilarityLoss` seems to produce a weaker training signal than SparseCoSENTLoss. In our experiments, SparseCoSENTLoss is recommended.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCoSENTLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from __future__ import annotations
from sentence_transformers import util
from sentence_transformers.losses.CoSENTLoss import CoSENTLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCoSENTLoss(CoSENTLoss):
def __init__(self, model: SparseEncoder, scale: float = 20.0, similarity_fct=util.cos_sim) -> None:
return super().__init__(model, scale=scale, similarity_fct=similarity_fct)
|
from .hifigan_pipeline import HIFIGAN_VOCODER_V3_LJSPEECH, HiFiGANVocoderBundle
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
"HIFIGAN_VOCODER_V3_LJSPEECH",
"HiFiGANVocoderBundle",
]
|
from .rnnt_pipeline import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
__all__ = [
"EMFORMER_RNNT_BASE_MUSTC",
"EMFORMER_RNNT_BASE_TEDLIUM3",
]
|
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
import logging
import os
import sys
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
script_folder_path = os.path.dirname(os.path.realpath(__file__))
# Limit torch to 4 threads
torch.set_num_threads(4)
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
model_name = sys.argv[1] if len(sys.argv) > 1 else "stsb-distilroberta-base-v2"
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
model.evaluate(dev_evaluator)
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
model.evaluate(test_evaluator)
|
"""
This examples loads a pre-trained model and evaluates it on the STSbenchmark dataset
Usage:
python evaluation_stsbenchmark.py
OR
python evaluation_stsbenchmark.py model_name
"""
from sentence_transformers import SentenceTransformer, util, LoggingHandler, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
import sys
import torch
import gzip
import os
import csv
script_folder_path = os.path.dirname(os.path.realpath(__file__))
#Limit torch to 4 threads
torch.set_num_threads(4)
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
#### /print debug information to stdout
model_name = sys.argv[1] if len(sys.argv) > 1 else 'stsb-distilroberta-base-v2'
# Load a named sentence model (based on BERT). This will download the model from our server.
# Alternatively, you can also pass a filepath to SentenceTransformer()
model = SentenceTransformer(model_name)
sts_dataset_path = 'data/stsbenchmark.tsv.gz'
if not os.path.exists(sts_dataset_path):
util.http_get('https://sbert.net/datasets/stsbenchmark.tsv.gz', sts_dataset_path)
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row['score']) / 5.0 # Normalize score to range 0 ... 1
inp_example = InputExample(texts=[row['sentence1'], row['sentence2']], label=score)
if row['split'] == 'dev':
dev_samples.append(inp_example)
elif row['split'] == 'test':
test_samples.append(inp_example)
else:
train_samples.append(inp_example)
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')
model.evaluate(evaluator)
evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test')
model.evaluate(evaluator)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""Forward function of SparseR-CNN and QueryInst in train stage.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (List[Tensor], optional) : Segmentation masks for
each box. This is required to train QueryInst.
proposals (List[Tensor], optional): override rpn proposals with
custom proposals. Use when `with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert proposals is None, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, img_metas)
results = self.roi_head.simple_test(
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh=imgs_whwh,
rescale=rescale)
return results
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
# backbone
x = self.extract_feat(img)
# rpn
num_imgs = len(img)
dummy_img_metas = [
dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
]
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, dummy_img_metas)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
proposal_features,
dummy_img_metas)
return roi_outs
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN do not support external proposals'
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""Forward function of SparseR-CNN in train stage.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (List[Tensor], optional) : Segmentation masks for
each box. But we don't support it in this architecture.
proposals (List[Tensor], optional): override rpn proposals with
custom proposals. Use when `with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert proposals is None, 'Sparse R-CNN does not support' \
' external proposals'
assert gt_masks is None, 'Sparse R-CNN does not instance segmentation'
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, img_metas)
bbox_results = self.roi_head.simple_test(
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh=imgs_whwh,
rescale=rescale)
return bbox_results
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
# backbone
x = self.extract_feat(img)
# rpn
num_imgs = len(img)
dummy_img_metas = [
dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
]
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, dummy_img_metas)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
proposal_features,
dummy_img_metas)
return roi_outs
|
"""Migrate LangChain to the most recent version."""
from pathlib import Path
import rich
import typer
from gritql import run # type: ignore
from typer import Option
def get_gritdir_path() -> Path:
"""Get the path to the grit directory."""
script_dir = Path(__file__).parent
return script_dir / ".grit"
def migrate(
ctx: typer.Context,
# Using diff instead of dry-run for backwards compatibility with the old CLI
diff: bool = Option(
False,
"--diff",
help="Show the changes that would be made without applying them.",
),
interactive: bool = Option(
False,
"--interactive",
help="Prompt for confirmation before making each change",
),
) -> None:
"""Migrate langchain to the most recent version.
Any undocumented arguments will be passed to the Grit CLI.
"""
rich.print(
"✈️ This script will help you migrate to a LangChain 0.3. "
"This migration script will attempt to replace old imports in the code "
"with new ones. "
"If you need to migrate to LangChain 0.2, please downgrade to version 0.0.29 "
"of the langchain-cli.\n\n"
"🔄 You will need to run the migration script TWICE to migrate (e.g., "
"to update llms import from langchain, the script will first move them to "
"corresponding imports from the community package, and on the second "
"run will migrate from the community package to the partner package "
"when possible). \n\n"
"🔍 You can pre-view the changes by running with the --diff flag. \n\n"
"🚫 You can disable specific import changes by using the --disable "
"flag. \n\n"
"📄 Update your pyproject.toml or requirements.txt file to "
"reflect any imports from new packages. For example, if you see new "
"imports from langchain_openai, langchain_anthropic or "
"langchain_text_splitters you "
"should add them to your dependencies! \n\n"
'⚠️ This script is a "best-effort", and is likely to make some '
"mistakes.\n\n"
"🛡️ Backup your code prior to running the migration script -- it will "
"modify your files!\n\n"
)
rich.print("-" * 10)
rich.print()
args = list(ctx.args)
if interactive:
args.append("--interactive")
if diff:
args.append("--dry-run")
final_code = run.apply_pattern(
"langchain_all_migrations()",
args,
grit_dir=get_gritdir_path(),
)
raise typer.Exit(code=final_code)
|
"""Migrate LangChain to the most recent version."""
from pathlib import Path
import rich
import typer
from gritql import run # type: ignore
from typer import Option
def get_gritdir_path() -> Path:
"""Get the path to the grit directory."""
script_dir = Path(__file__).parent
return script_dir / ".grit"
def migrate(
ctx: typer.Context,
# Using diff instead of dry-run for backwards compatibility with the old CLI
diff: bool = Option(
False,
"--diff",
help="Show the changes that would be made without applying them.",
),
interactive: bool = Option(
False,
"--interactive",
help="Prompt for confirmation before making each change",
),
) -> None:
"""Migrate langchain to the most recent version.
Any undocumented arguments will be passed to the Grit CLI.
"""
rich.print(
"✈️ This script will help you migrate to a LangChain 0.3. "
"This migration script will attempt to replace old imports in the code "
"with new ones. "
"If you need to migrate to LangChain 0.2, please downgrade to version 0.0.29 "
"of the langchain-cli.\n\n"
"🔄 You will need to run the migration script TWICE to migrate (e.g., "
"to update llms import from langchain, the script will first move them to "
"corresponding imports from the community package, and on the second "
"run will migrate from the community package to the partner package "
"when possible). \n\n"
"🔍 You can pre-view the changes by running with the --diff flag. \n\n"
"🚫 You can disable specific import changes by using the --disable "
"flag. \n\n"
"📄 Update your pyproject.toml or requirements.txt file to "
"reflect any imports from new packages. For example, if you see new "
"imports from langchain_openai, langchain_anthropic or "
"langchain_text_splitters you "
"should them to your dependencies! \n\n"
'⚠️ This script is a "best-effort", and is likely to make some '
"mistakes.\n\n"
"🛡️ Backup your code prior to running the migration script -- it will "
"modify your files!\n\n"
)
rich.print("-" * 10)
rich.print()
args = list(ctx.args)
if interactive:
args.append("--interactive")
if diff:
args.append("--dry-run")
final_code = run.apply_pattern(
"langchain_all_migrations()",
args,
grit_dir=get_gritdir_path(),
)
raise typer.Exit(code=final_code)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,
generate_coordinate, mask2ndarray, multi_apply,
select_single_mlvl, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl',
'filter_scores_and_topk'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,
reduce_mean)
from .misc import (center_of_mass, flip_tensor, generate_coordinate,
mask2ndarray, multi_apply, select_single_mlvl, unmap)
__all__ = [
'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',
'center_of_mass', 'generate_coordinate', 'select_single_mlvl'
]
|
import os
import re
import subprocess
from keras.src import backend
# For torch, use index url to avoid installing nvidia drivers for the test.
BACKEND_REQ = {
"tensorflow": ("tensorflow-cpu", ""),
"torch": (
"torch torchvision",
"--extra-index-url https://download.pytorch.org/whl/cpu ",
),
"jax": ("jax[cpu]", ""),
"openvino": ("openvino", ""),
}
def setup_package():
subprocess.run("rm -rf tmp_build_dir", shell=True)
build_process = subprocess.run(
"python3 pip_build.py",
capture_output=True,
text=True,
shell=True,
)
print(build_process.stdout)
whl_path = re.findall(
r"[^\s]*\.whl",
build_process.stdout,
)[-1]
if not whl_path:
print(build_process.stderr)
raise ValueError("Installing Keras package unsuccessful. ")
return whl_path
def create_virtualenv():
env_setup = [
# Create virtual environment
"python3 -m venv test_env",
]
os.environ["PATH"] = (
"/test_env/bin/" + os.pathsep + os.environ.get("PATH", "")
)
run_commands_local(env_setup)
def manage_venv_installs(whl_path):
other_backends = list(set(BACKEND_REQ.keys()) - {backend.backend()})
backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()]
install_setup = [
# Installs the backend's package and common requirements
"pip install " + backend_extra_url + backend_pkg,
"pip install -r requirements-common.txt",
"pip install pytest",
# Ensure other backends are uninstalled
"pip uninstall -y "
+ BACKEND_REQ[other_backends[0]][0]
+ " "
+ BACKEND_REQ[other_backends[1]][0]
+ " "
+ BACKEND_REQ[other_backends[2]][0],
# Install `.whl` package
"pip install " + whl_path,
]
run_commands_venv(install_setup)
def run_keras_flow():
test_script = [
# Runs the example script
"python -m pytest integration_tests/basic_full_flow.py",
]
run_commands_venv(test_script)
def cleanup():
cleanup_script = [
# Exits virtual environment, deletes files, and any
# miscellaneous install logs
"exit",
"rm -rf test_env",
"rm -rf tmp_build_dir",
"rm -f *+cpu",
]
run_commands_local(cleanup_script)
def run_commands_local(commands):
for command in commands:
print(f"Running command: {command}")
subprocess.run(command, shell=True)
def run_commands_venv(commands):
for command in commands:
print(f"Running command: {command}")
cmd_with_args = command.split(" ")
cmd_with_args[0] = "test_env/bin/" + cmd_with_args[0]
p = subprocess.Popen(cmd_with_args)
assert p.wait() == 0
def test_keras_imports():
try:
# Ensures packages from all backends are installed.
# Builds Keras core package and returns package file path.
whl_path = setup_package()
# Creates and activates a virtual environment.
create_virtualenv()
# Ensures the backend's package is installed
# and the other backends are uninstalled.
manage_venv_installs(whl_path)
# Runs test of basic flow in Keras Core.
# Tests for backend-specific imports and `model.fit()`.
run_keras_flow()
# Removes virtual environment and associated files
finally:
cleanup()
if __name__ == "__main__":
test_keras_imports()
|
import os
import re
import subprocess
from keras.src import backend
# For torch, use index url to avoid installing nvidia drivers for the test.
BACKEND_REQ = {
"tensorflow": ("tensorflow-cpu", ""),
"torch": (
"torch torchvision",
"--extra-index-url https://download.pytorch.org/whl/cpu ",
),
"jax": ("jax[cpu]", ""),
}
def setup_package():
subprocess.run("rm -rf tmp_build_dir", shell=True)
build_process = subprocess.run(
"python3 pip_build.py",
capture_output=True,
text=True,
shell=True,
)
print(build_process.stdout)
whl_path = re.findall(
r"[^\s]*\.whl",
build_process.stdout,
)[-1]
if not whl_path:
print(build_process.stderr)
raise ValueError("Installing Keras package unsuccessful. ")
return whl_path
def create_virtualenv():
env_setup = [
# Create virtual environment
"python3 -m venv test_env",
]
os.environ["PATH"] = (
"/test_env/bin/" + os.pathsep + os.environ.get("PATH", "")
)
run_commands_local(env_setup)
def manage_venv_installs(whl_path):
other_backends = list(set(BACKEND_REQ.keys()) - {backend.backend()})
backend_pkg, backend_extra_url = BACKEND_REQ[backend.backend()]
install_setup = [
# Installs the backend's package and common requirements
"pip install " + backend_extra_url + backend_pkg,
"pip install -r requirements-common.txt",
"pip install pytest",
# Ensure other backends are uninstalled
"pip uninstall -y "
+ BACKEND_REQ[other_backends[0]][0]
+ " "
+ BACKEND_REQ[other_backends[1]][0],
# Install `.whl` package
"pip install " + whl_path,
]
run_commands_venv(install_setup)
def run_keras_flow():
test_script = [
# Runs the example script
"python -m pytest integration_tests/basic_full_flow.py",
]
run_commands_venv(test_script)
def cleanup():
cleanup_script = [
# Exits virtual environment, deletes files, and any
# miscellaneous install logs
"exit",
"rm -rf test_env",
"rm -rf tmp_build_dir",
"rm -f *+cpu",
]
run_commands_local(cleanup_script)
def run_commands_local(commands):
for command in commands:
print(f"Running command: {command}")
subprocess.run(command, shell=True)
def run_commands_venv(commands):
for command in commands:
print(f"Running command: {command}")
cmd_with_args = command.split(" ")
cmd_with_args[0] = "test_env/bin/" + cmd_with_args[0]
p = subprocess.Popen(cmd_with_args)
assert p.wait() == 0
def test_keras_imports():
try:
# Ensures packages from all backends are installed.
# Builds Keras core package and returns package file path.
whl_path = setup_package()
# Creates and activates a virtual environment.
create_virtualenv()
# Ensures the backend's package is installed
# and the other backends are uninstalled.
manage_venv_installs(whl_path)
# Runs test of basic flow in Keras Core.
# Tests for backend-specific imports and `model.fit()`.
run_keras_flow()
# Removes virtual environment and associated files
finally:
cleanup()
if __name__ == "__main__":
test_keras_imports()
|
"""
=====================================
How to write your own Datapoint class
=====================================
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`.
"""
# %%
import torch
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.datapoints.Datapoint` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.datapoints.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/datapoints/_bounding_box.py>`_.
class MyDatapoint(datapoints.Datapoint):
pass
my_dp = MyDatapoint([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom Datapoint class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyDatapoint class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", datapoint_cls=MyDatapoint)
def hflip_my_datapoint(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return datapoints.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.datapoints.wrap` is used, see
# :ref:`datapoint_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyDatapoint`` instance:
my_dp = MyDatapoint(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_datapoint(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return datapoints.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
"""
=====================================
How to write your own Datapoint class
=====================================
This guide is intended for advanced users and downstream library maintainers. We explain how to
write your own datapoint class, and how to make it compatible with the built-in
Torchvision v2 transforms. Before continuing, make sure you have read
:ref:`sphx_glr_auto_examples_v2_transforms_plot_datapoints.py`.
"""
# %%
import torch
import torchvision
# We are using BETA APIs, so we deactivate the associated warning, thereby acknowledging that
# some APIs may slightly change in the future
torchvision.disable_beta_transforms_warning()
from torchvision import datapoints
from torchvision.transforms import v2
# %%
# We will create a very simple class that just inherits from the base
# :class:`~torchvision.datapoints.Datapoint` class. It will be enough to cover
# what you need to know to implement your more elaborate uses-cases. If you need
# to create a class that carries meta-data, take a look at how the
# :class:`~torchvision.datapoints.BoundingBoxes` class is `implemented
# <https://github.com/pytorch/vision/blob/main/torchvision/datapoints/_bounding_box.py>`_.
class MyDatapoint(datapoints.Datapoint):
pass
my_dp = MyDatapoint([1, 2, 3])
my_dp
# %%
# Now that we have defined our custom Datapoint class, we want it to be
# compatible with the built-in torchvision transforms, and the functional API.
# For that, we need to implement a kernel which performs the core of the
# transformation, and then "hook" it to the functional that we want to support
# via :func:`~torchvision.transforms.v2.functional.register_kernel`.
#
# We illustrate this process below: we create a kernel for the "horizontal flip"
# operation of our MyDatapoint class, and register it to the functional API.
from torchvision.transforms.v2 import functional as F
@F.register_kernel(functional="hflip", datapoint_cls=MyDatapoint)
def hflip_my_datapoint(my_dp, *args, **kwargs):
print("Flipping!")
out = my_dp.flip(-1)
return datapoints.wrap(out, like=my_dp)
# %%
# To understand why :func:`~torchvision.datapoints.wrap` is used, see
# :ref:`datapoint_unwrapping_behaviour`. Ignore the ``*args, **kwargs`` for now,
# we will explain it below in :ref:`param_forwarding`.
#
# .. note::
#
# In our call to ``register_kernel`` above we used a string
# ``functional="hflip"`` to refer to the functional we want to hook into. We
# could also have used the functional *itself*, i.e.
# ``@register_kernel(functional=F.hflip, ...)``.
#
# Now that we have registered our kernel, we can call the functional API on a
# ``MyDatapoint`` instance:
my_dp = MyDatapoint(torch.rand(3, 256, 256))
_ = F.hflip(my_dp)
# %%
# And we can also use the
# :class:`~torchvision.transforms.v2.RandomHorizontalFlip` transform, since it relies on :func:`~torchvision.transforms.v2.functional.hflip` internally:
t = v2.RandomHorizontalFlip(p=1)
_ = t(my_dp)
# %%
# .. note::
#
# We cannot register a kernel for a transform class, we can only register a
# kernel for a **functional**. The reason we can't register a transform
# class is because one transform may internally rely on more than one
# functional, so in general we can't register a single kernel for a given
# class.
#
# .. _param_forwarding:
#
# Parameter forwarding, and ensuring future compatibility of your kernels
# -----------------------------------------------------------------------
#
# The functional API that you're hooking into is public and therefore
# **backward** compatible: we guarantee that the parameters of these functionals
# won't be removed or renamed without a proper deprecation cycle. However, we
# don't guarantee **forward** compatibility, and we may add new parameters in
# the future.
#
# Imagine that in a future version, Torchvision adds a new ``inplace`` parameter
# to its :func:`~torchvision.transforms.v2.functional.hflip` functional. If you
# already defined and registered your own kernel as
def hflip_my_datapoint(my_dp): # noqa
print("Flipping!")
out = my_dp.flip(-1)
return datapoints.wrap(out, like=my_dp)
# %%
# then calling ``F.hflip(my_dp)`` will **fail**, because ``hflip`` will try to
# pass the new ``inplace`` parameter to your kernel, but your kernel doesn't
# accept it.
#
# For this reason, we recommend to always define your kernels with
# ``*args, **kwargs`` in their signature, as done above. This way, your kernel
# will be able to accept any new parameter that we may add in the future.
# (Technically, adding `**kwargs` only should be enough).
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class Flowers102(VisionDataset):
"""`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
between 40 and 258 images.
The images have large scale, pose and light variations. In addition, there are categories that
have large variations within the category, and several very similar categories.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a
transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
_file_dict = { # filename, md5
"image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
"label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
"setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
}
_splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "flowers-102"
self._images_folder = self._base_folder / "jpg"
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
from scipy.io import loadmat
set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id])
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_integrity(self):
if not (self._images_folder.exists() and self._images_folder.is_dir()):
return False
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
if not check_integrity(str(self._base_folder / filename), md5):
return False
return True
def download(self):
if self._check_integrity():
return
download_and_extract_archive(
f"{self._download_url_prefix}{self._file_dict['image'][0]}",
str(self._base_folder),
md5=self._file_dict["image"][1],
)
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
from .vision import VisionDataset
class Flowers102(VisionDataset):
"""`Oxford 102 Flower <https://www.robots.ox.ac.uk/~vgg/data/flowers/102/>`_ Dataset.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Oxford 102 Flower is an image classification dataset consisting of 102 flower categories. The
flowers were chosen to be flowers commonly occurring in the United Kingdom. Each class consists of
between 40 and 258 images.
The images have large scale, pose and light variations. In addition, there are categories that
have large variations within the category, and several very similar categories.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a
transformed version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
_download_url_prefix = "https://www.robots.ox.ac.uk/~vgg/data/flowers/102/"
_file_dict = { # filename, md5
"image": ("102flowers.tgz", "52808999861908f626f3c1f4e79d11fa"),
"label": ("imagelabels.mat", "e0620be6f572b9609742df49c70aed4d"),
"setid": ("setid.mat", "a5357ecc9cb78c4bef273ce3793fc85c"),
}
_splits_map = {"train": "trnid", "val": "valid", "test": "tstid"}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._base_folder = Path(self.root) / "flowers-102"
self._images_folder = self._base_folder / "jpg"
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
from scipy.io import loadmat
set_ids = loadmat(self._base_folder / self._file_dict["setid"][0], squeeze_me=True)
image_ids = set_ids[self._splits_map[self._split]].tolist()
labels = loadmat(self._base_folder / self._file_dict["label"][0], squeeze_me=True)
image_id_to_label = dict(enumerate((labels["labels"] - 1).tolist(), 1))
self._labels = []
self._image_files = []
for image_id in image_ids:
self._labels.append(image_id_to_label[image_id])
self._image_files.append(self._images_folder / f"image_{image_id:05d}.jpg")
def __len__(self) -> int:
return len(self._image_files)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._image_files[idx], self._labels[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_integrity(self):
if not (self._images_folder.exists() and self._images_folder.is_dir()):
return False
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
if not check_integrity(str(self._base_folder / filename), md5):
return False
return True
def download(self):
if self._check_integrity():
return
download_and_extract_archive(
f"{self._download_url_prefix}{self._file_dict['image'][0]}",
str(self._base_folder),
md5=self._file_dict["image"][1],
)
for id in ["label", "setid"]:
filename, md5 = self._file_dict[id]
download_url(self._download_url_prefix + filename, str(self._base_folder), md5=md5)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils.logging import get_logger
logger = get_logger(__name__) # pylint: disable=invalid-name
class CacheMixin:
r"""
A class for enable/disabling caching techniques on diffusion models.
Supported caching techniques:
- [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588)
- [FasterCache](https://huggingface.co/papers/2410.19355)
"""
_cache_config = None
@property
def is_cache_enabled(self) -> bool:
return self._cache_config is not None
def enable_cache(self, config) -> None:
r"""
Enable caching techniques on the model.
Args:
config (`Union[PyramidAttentionBroadcastConfig]`):
The configuration for applying the caching technique. Currently supported caching techniques are:
- [`~hooks.PyramidAttentionBroadcastConfig`]
Example:
```python
>>> import torch
>>> from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig
>>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
>>> pipe.to("cuda")
>>> config = PyramidAttentionBroadcastConfig(
... spatial_attention_block_skip_range=2,
... spatial_attention_timestep_skip_range=(100, 800),
... current_timestep_callback=lambda: pipe.current_timestep,
... )
>>> pipe.transformer.enable_cache(config)
```
"""
from ..hooks import (
FasterCacheConfig,
PyramidAttentionBroadcastConfig,
apply_faster_cache,
apply_pyramid_attention_broadcast,
)
if self.is_cache_enabled:
raise ValueError(
f"Caching has already been enabled with {type(self._cache_config)}. To apply a new caching technique, please disable the existing one first."
)
if isinstance(config, PyramidAttentionBroadcastConfig):
apply_pyramid_attention_broadcast(self, config)
elif isinstance(config, FasterCacheConfig):
apply_faster_cache(self, config)
else:
raise ValueError(f"Cache config {type(config)} is not supported.")
self._cache_config = config
def disable_cache(self) -> None:
from ..hooks import FasterCacheConfig, HookRegistry, PyramidAttentionBroadcastConfig
from ..hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK
from ..hooks.pyramid_attention_broadcast import _PYRAMID_ATTENTION_BROADCAST_HOOK
if self._cache_config is None:
logger.warning("Caching techniques have not been enabled, so there's nothing to disable.")
return
if isinstance(self._cache_config, PyramidAttentionBroadcastConfig):
registry = HookRegistry.check_if_exists_or_initialize(self)
registry.remove_hook(_PYRAMID_ATTENTION_BROADCAST_HOOK, recurse=True)
elif isinstance(self._cache_config, FasterCacheConfig):
registry = HookRegistry.check_if_exists_or_initialize(self)
registry.remove_hook(_FASTER_CACHE_DENOISER_HOOK, recurse=True)
registry.remove_hook(_FASTER_CACHE_BLOCK_HOOK, recurse=True)
else:
raise ValueError(f"Cache config {type(self._cache_config)} is not supported.")
self._cache_config = None
def _reset_stateful_cache(self, recurse: bool = True) -> None:
from ..hooks import HookRegistry
HookRegistry.check_if_exists_or_initialize(self).reset_stateful_hooks(recurse=recurse)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils.logging import get_logger
logger = get_logger(__name__) # pylint: disable=invalid-name
class CacheMixin:
r"""
A class for enable/disabling caching techniques on diffusion models.
Supported caching techniques:
- [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588)
- [FasterCache](https://huggingface.co/papers/2410.19355)
"""
_cache_config = None
@property
def is_cache_enabled(self) -> bool:
return self._cache_config is not None
def enable_cache(self, config) -> None:
r"""
Enable caching techniques on the model.
Args:
config (`Union[PyramidAttentionBroadcastConfig]`):
The configuration for applying the caching technique. Currently supported caching techniques are:
- [`~hooks.PyramidAttentionBroadcastConfig`]
Example:
```python
>>> import torch
>>> from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig
>>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
>>> pipe.to("cuda")
>>> config = PyramidAttentionBroadcastConfig(
... spatial_attention_block_skip_range=2,
... spatial_attention_timestep_skip_range=(100, 800),
... current_timestep_callback=lambda: pipe.current_timestep,
... )
>>> pipe.transformer.enable_cache(config)
```
"""
from ..hooks import (
FasterCacheConfig,
PyramidAttentionBroadcastConfig,
apply_faster_cache,
apply_pyramid_attention_broadcast,
)
if self.is_cache_enabled:
raise ValueError(
f"Caching has already been enabled with {type(self._cache_config)}. To apply a new caching technique, please disable the existing one first."
)
if isinstance(config, PyramidAttentionBroadcastConfig):
apply_pyramid_attention_broadcast(self, config)
elif isinstance(config, FasterCacheConfig):
apply_faster_cache(self, config)
else:
raise ValueError(f"Cache config {type(config)} is not supported.")
self._cache_config = config
def disable_cache(self) -> None:
from ..hooks import FasterCacheConfig, HookRegistry, PyramidAttentionBroadcastConfig
from ..hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK
from ..hooks.pyramid_attention_broadcast import _PYRAMID_ATTENTION_BROADCAST_HOOK
if self._cache_config is None:
logger.warning("Caching techniques have not been enabled, so there's nothing to disable.")
return
if isinstance(self._cache_config, PyramidAttentionBroadcastConfig):
registry = HookRegistry.check_if_exists_or_initialize(self)
registry.remove_hook(_PYRAMID_ATTENTION_BROADCAST_HOOK, recurse=True)
elif isinstance(self._cache_config, FasterCacheConfig):
registry = HookRegistry.check_if_exists_or_initialize(self)
registry.remove_hook(_FASTER_CACHE_DENOISER_HOOK, recurse=True)
registry.remove_hook(_FASTER_CACHE_BLOCK_HOOK, recurse=True)
else:
raise ValueError(f"Cache config {type(self._cache_config)} is not supported.")
self._cache_config = None
def _reset_stateful_cache(self, recurse: bool = True) -> None:
from ..hooks import HookRegistry
HookRegistry.check_if_exists_or_initialize(self).reset_stateful_hooks(recurse=recurse)
|
import os
import time
import uuid
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
cur_dir = os.path.dirname(os.path.abspath(__file__))
qdrant_yml = os.path.abspath(os.path.join(cur_dir, 'docker-compose.yml'))
@pytest.fixture(scope='session', autouse=True)
def start_storage():
os.system(f"docker-compose -f {qdrant_yml} up -d --remove-orphans")
time.sleep(1)
yield
os.system(f"docker-compose -f {qdrant_yml} down --remove-orphans")
@pytest.fixture(scope='function')
def tmp_collection_name():
return uuid.uuid4().hex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
client.delete_collection(collection_name='documents')
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import pytest
import qdrant_client
from docarray.index import QdrantDocumentIndex
@pytest.fixture
def qdrant() -> qdrant_client.QdrantClient:
"""This fixture takes care of removing the collection before each test case"""
client = qdrant_client.QdrantClient(path='/tmp/qdrant-local')
client.delete_collection(collection_name='documents')
return client
@pytest.fixture
def qdrant_config(qdrant) -> QdrantDocumentIndex.DBConfig:
return QdrantDocumentIndex.DBConfig(path=qdrant._client.location)
|
import pytest
from langchain.evaluation.parsing.json_schema import JsonSchemaEvaluator
@pytest.fixture
def json_schema_evaluator() -> JsonSchemaEvaluator:
return JsonSchemaEvaluator()
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_requires_input(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
assert json_schema_evaluator.requires_input is False
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_requires_reference(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
assert json_schema_evaluator.requires_reference is True
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_evaluation_name(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
assert json_schema_evaluator.evaluation_name == "json_schema_validation"
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_valid_prediction(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
prediction = '{"name": "John", "age": 30}'
reference = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
}
result = json_schema_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
assert result["score"] is True
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_invalid_prediction(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
prediction = '{"name": "John", "age": "30"}' # age is a string instead of integer
reference = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
}
result = json_schema_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
assert result["score"] is False
assert "reasoning" in result
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_missing_property(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
prediction = '{"name": "John"}' # age property is missing
reference = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
"required": ["name", "age"],
}
result = json_schema_evaluator._evaluate_strings(
prediction=prediction,
reference=reference,
)
assert result["score"] is False
assert "reasoning" in result
|
import pytest
from langchain.evaluation.parsing.json_schema import JsonSchemaEvaluator
@pytest.fixture
def json_schema_evaluator() -> JsonSchemaEvaluator:
return JsonSchemaEvaluator()
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_requires_input(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
assert json_schema_evaluator.requires_input is False
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_requires_reference(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
assert json_schema_evaluator.requires_reference is True
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_evaluation_name(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
assert json_schema_evaluator.evaluation_name == "json_schema_validation"
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_valid_prediction(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
prediction = '{"name": "John", "age": 30}'
reference = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
}
result = json_schema_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
assert result["score"] is True
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_invalid_prediction(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
prediction = '{"name": "John", "age": "30"}' # age is a string instead of integer
reference = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
}
result = json_schema_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
assert result["score"] is False
assert "reasoning" in result
@pytest.mark.requires("jsonschema")
def test_json_schema_evaluator_missing_property(
json_schema_evaluator: JsonSchemaEvaluator,
) -> None:
prediction = '{"name": "John"}' # age property is missing
reference = {
"type": "object",
"properties": {"name": {"type": "string"}, "age": {"type": "integer"}},
"required": ["name", "age"],
}
result = json_schema_evaluator._evaluate_strings(
prediction=prediction, reference=reference
)
assert result["score"] is False
assert "reasoning" in result
|
from __future__ import annotations
from collections.abc import Iterable
from enum import Enum
from typing import Any
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"SiameseDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from __future__ import annotations
from enum import Enum
from typing import Any, Iterable
import torch.nn.functional as F
from torch import Tensor, nn
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""The metric for the contrastive loss"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
) -> None:
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
Args:
model: SentenceTransformer model
distance_metric: Function that returns a distance between
two embeddings. The class SiameseDistanceMetric contains
pre-defined metrices that can be used
margin: Negative samples (label == 0) should have a distance
of at least the margin value.
size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Example:
::
from sentence_transformers import SentenceTransformer, SentenceTransformerTrainer, losses
from datasets import Dataset
model = SentenceTransformer("microsoft/mpnet-base")
train_dataset = Dataset.from_dict({
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"label": [1, 0],
})
loss = losses.ContrastiveLoss(model)
trainer = SentenceTransformerTrainer(
model=model,
train_dataset=train_dataset,
loss=loss,
)
trainer.train()
"""
super().__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self) -> dict[str, Any]:
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = f"SiameseDistanceMetric.{name}"
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[dict[str, Tensor]], labels: Tensor) -> Tensor:
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes,
num_ins,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from ..builder import HEADS
from .anchor_head import AnchorHead
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
""""RetinaHead with separate BN.
In RetinaHead, conv/norm layers are shared across different FPN levels,
while in RetinaSepBNHead, conv layers are shared across different FPN
levels, but BN layers are separated.
"""
def __init__(self,
num_classes,
num_ins,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_ins = num_ins
super(RetinaSepBNHead, self).__init__(
num_classes, in_channels, init_cfg=init_cfg, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.num_ins):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
for i in range(self.stacked_convs):
for j in range(1, self.num_ins):
self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
super(RetinaSepBNHead, self).init_weights()
for m in self.cls_convs[0]:
normal_init(m.conv, std=0.01)
for m in self.reg_convs[0]:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_anchors * 4.
"""
cls_scores = []
bbox_preds = []
for i, x in enumerate(feats):
cls_feat = feats[i]
reg_feat = feats[i]
for cls_conv in self.cls_convs[i]:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs[i]:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(bbox_pred)
return cls_scores, bbox_preds
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .webdataset import webdataset
def _hash_python_lines(lines: List[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GLIP',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='FPN',
in_channels=[192, 384, 768],
out_channels=256,
start_level=0,
relu_before_extra_convs=True,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSVLFusionHead',
lang_model_name=lang_model_name,
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5),
bbox_coder=dict(
type='DeltaXYWHBBoxCoderForGLIP',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
),
language_model=dict(type='BertModel', name=lang_model_name),
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'text', 'custom_entities'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_classes=True))
test_dataloader = val_dataloader
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
lang_model_name = 'bert-base-uncased'
model = dict(
type='GLIP',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.53, 116.28, 123.675],
std=[57.375, 57.12, 58.395],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
type='SwinTransformer',
embed_dims=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False,
convert_weights=False),
neck=dict(
type='FPN',
in_channels=[192, 384, 768],
out_channels=256,
start_level=0,
relu_before_extra_convs=True,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(
type='ATSSVLFusionHead',
lang_model_name=lang_model_name,
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128],
center_offset=0.5),
bbox_coder=dict(
type='DeltaXYWHBBoxCoderForGLIP',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
),
language_model=dict(type='BertModel', name=lang_model_name),
train_cfg=dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend='pillow'),
dict(
type='FixScaleResize',
scale=(800, 1333),
keep_ratio=True,
backend='pillow'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'caption', 'custom_entities'))
]
val_dataloader = dict(
dataset=dict(pipeline=test_pipeline, return_caption=True))
test_dataloader = val_dataloader
|
# Copyright (c) OpenMMLab. All rights reserved.
import inspect
from mmengine.logging import print_log
def get_caller_name():
"""Get name of caller method."""
# this_func_frame = inspect.stack()[0][0] # i.e., get_caller_name
# callee_frame = inspect.stack()[1][0] # e.g., log_img_scale
caller_frame = inspect.stack()[2][0] # e.g., caller of log_img_scale
caller_method = caller_frame.f_code.co_name
try:
caller_class = caller_frame.f_locals['self'].__class__.__name__
return f'{caller_class}.{caller_method}'
except KeyError: # caller is a function
return caller_method
def log_img_scale(img_scale, shape_order='hw', skip_square=False):
"""Log image size.
Args:
img_scale (tuple): Image size to be logged.
shape_order (str, optional): The order of image shape.
'hw' for (height, width) and 'wh' for (width, height).
Defaults to 'hw'.
skip_square (bool, optional): Whether to skip logging for square
img_scale. Defaults to False.
Returns:
bool: Whether to have done logging.
"""
if shape_order == 'hw':
height, width = img_scale
elif shape_order == 'wh':
width, height = img_scale
else:
raise ValueError(f'Invalid shape_order {shape_order}.')
if skip_square and (height == width):
return False
caller = get_caller_name()
print_log(
f'image shape: height={height}, width={width} in {caller}',
logger='current')
return True
|
# Copyright (c) OpenMMLab. All rights reserved.
import inspect
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)
return logger
def get_caller_name():
"""Get name of caller method."""
# this_func_frame = inspect.stack()[0][0] # i.e., get_caller_name
# callee_frame = inspect.stack()[1][0] # e.g., log_img_scale
caller_frame = inspect.stack()[2][0] # e.g., caller of log_img_scale
caller_method = caller_frame.f_code.co_name
try:
caller_class = caller_frame.f_locals['self'].__class__.__name__
return f'{caller_class}.{caller_method}'
except KeyError: # caller is a function
return caller_method
def log_img_scale(img_scale, shape_order='hw', skip_square=False):
"""Log image size.
Args:
img_scale (tuple): Image size to be logged.
shape_order (str, optional): The order of image shape.
'hw' for (height, width) and 'wh' for (width, height).
Defaults to 'hw'.
skip_square (bool, optional): Whether to skip logging for square
img_scale. Defaults to False.
Returns:
bool: Whether to have done logging.
"""
if shape_order == 'hw':
height, width = img_scale
elif shape_order == 'wh':
width, height = img_scale
else:
raise ValueError(f'Invalid shape_order {shape_order}.')
if skip_square and (height == width):
return False
logger = get_root_logger()
caller = get_caller_name()
logger.info(f'image shape: height={height}, width={width} in {caller}')
return True
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available, is_npu_support_full_precision
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def register_torch_npu_optimizers() -> List[str]:
"""Register optimizers in ``torch npu`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
if not is_npu_available():
return []
import torch_npu
if not hasattr(torch_npu, 'optim'):
return []
torch_npu_optimizers = []
for module_name in dir(torch_npu.optim):
if module_name.startswith('__') or module_name in OPTIMIZERS:
continue
_optim = getattr(torch_npu.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_npu_optimizers.append(module_name)
return torch_npu_optimizers
NPU_OPTIMIZERS = register_torch_npu_optimizers()
def register_dadaptation_optimizers() -> List[str]:
"""Register optimizers in ``dadaptation`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
dadaptation_optimizers = []
try:
import dadaptation
except ImportError:
pass
else:
for module_name in ['DAdaptAdaGrad', 'DAdaptAdam', 'DAdaptSGD']:
_optim = getattr(dadaptation, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
dadaptation_optimizers.append(module_name)
return dadaptation_optimizers
DADAPTATION_OPTIMIZERS = register_dadaptation_optimizers()
def register_lion_optimizers() -> List[str]:
"""Register Lion optimizer to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
optimizers = []
try:
from lion_pytorch import Lion
except ImportError:
pass
else:
OPTIMIZERS.register_module(module=Lion)
optimizers.append('Lion')
return optimizers
LION_OPTIMIZERS = register_lion_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision
# to make the training normal
if is_npu_available() and not is_npu_support_full_precision():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def register_torch_npu_optimizers() -> List[str]:
"""Register optimizers in ``torch npu`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
if not is_npu_available():
return []
import torch_npu
if not hasattr(torch_npu, 'optim'):
return []
torch_npu_optimizers = []
for module_name in dir(torch_npu.optim):
if module_name.startswith('__') or module_name in OPTIMIZERS:
continue
_optim = getattr(torch_npu.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_npu_optimizers.append(module_name)
return torch_npu_optimizers
NPU_OPTIMIZERS = register_torch_npu_optimizers()
def register_dadaptation_optimizers() -> List[str]:
"""Register optimizers in ``dadaptation`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
dadaptation_optimizers = []
try:
import dadaptation
except ImportError:
pass
else:
for module_name in ['DAdaptAdaGrad', 'DAdaptAdam', 'DAdaptSGD']:
_optim = getattr(dadaptation, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
dadaptation_optimizers.append(module_name)
return dadaptation_optimizers
DADAPTATION_OPTIMIZERS = register_dadaptation_optimizers()
def register_lion_optimizers() -> List[str]:
"""Register Lion optimizer to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
optimizers = []
try:
from lion_pytorch import Lion
except ImportError:
pass
else:
OPTIMIZERS.register_module(module=Lion)
optimizers.append('Lion')
return optimizers
LION_OPTIMIZERS = register_lion_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_gif,
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
from typing import Any, Dict, Iterator
import torch
from ..utils import _log_api_usage_once
try:
from ._load_gpu_decoder import _HAS_GPU_VIDEO_DECODER
except ModuleNotFoundError:
_HAS_GPU_VIDEO_DECODER = False
from ._video_opt import (
_HAS_VIDEO_OPT,
_probe_video_from_file,
_probe_video_from_memory,
_read_video_from_file,
_read_video_from_memory,
_read_video_timestamps_from_file,
_read_video_timestamps_from_memory,
Timebase,
VideoMetaData,
)
from .image import (
decode_image,
decode_jpeg,
decode_png,
encode_jpeg,
encode_png,
ImageReadMode,
read_file,
read_image,
write_file,
write_jpeg,
write_png,
)
from .video import read_video, read_video_timestamps, write_video
from .video_reader import VideoReader
__all__ = [
"write_video",
"read_video",
"read_video_timestamps",
"_read_video_from_file",
"_read_video_timestamps_from_file",
"_probe_video_from_file",
"_read_video_from_memory",
"_read_video_timestamps_from_memory",
"_probe_video_from_memory",
"_HAS_VIDEO_OPT",
"_HAS_GPU_VIDEO_DECODER",
"_read_video_clip_from_memory",
"_read_video_meta_data",
"VideoMetaData",
"Timebase",
"ImageReadMode",
"decode_image",
"decode_jpeg",
"decode_png",
"encode_jpeg",
"encode_png",
"read_file",
"read_image",
"write_file",
"write_jpeg",
"write_png",
"Video",
"VideoReader",
]
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import deprecate
from .autoencoders.vq_model import VQEncoderOutput, VQModel
class VQEncoderOutput(VQEncoderOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `VQEncoderOutput` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQEncoderOutput`, instead."
deprecate("VQEncoderOutput", "0.31", deprecation_message)
super().__init__(*args, **kwargs)
class VQModel(VQModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `VQModel` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQModel`, instead."
deprecate("VQModel", "0.31", deprecation_message)
super().__init__(*args, **kwargs)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils import deprecate
from .autoencoders.vq_model import VQEncoderOutput, VQModel
class VQEncoderOutput(VQEncoderOutput):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `VQEncoderOutput` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQEncoderOutput`, instead."
deprecate("VQEncoderOutput", "0.31", deprecation_message)
super().__init__(*args, **kwargs)
class VQModel(VQModel):
def __init__(self, *args, **kwargs):
deprecation_message = "Importing `VQModel` from `diffusers.models.vq_model` is deprecated and this will be removed in a future version. Please use `from diffusers.models.autoencoders.vq_model import VQModel`, instead."
deprecate("VQModel", "0.31", deprecation_message)
super().__init__(*args, **kwargs)
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
# use caffe img_norm
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(requires_grad=False),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
# Turn 1.11.0aHASH into 1.11 (major.minor only)
version = ".".join(torchvision.__version__.split(".")[:2])
if version >= "0.16":
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
# TODO: remove once pytorch/pytorch#110436 is resolved
if sys.version_info < (3, 12, 0):
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
# Turn 1.11.0aHASH into 1.11 (major.minor only)
version = ".".join(torchvision.__version__.split(".")[:2])
if version >= "0.16":
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
from ..utils import is_torch_available
if is_torch_available():
from .hooks import HookRegistry, ModelHook
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
from .pyramid_attention_broadcast import PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
|
from ..utils import is_torch_available
if is_torch_available():
from .layerwise_casting import apply_layerwise_casting, apply_layerwise_casting_hook
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
import torch.nn as nn
from mmengine.runner import load_checkpoint
from torch import Tensor
from mmdet.core import ConfigType, OptConfigType, SampleList
from mmdet.registry import MODELS
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
teacher_backbone: ConfigType,
teacher_neck: ConfigType,
teacher_bbox_head: ConfigType,
teacher_ckpt: Optional[str] = None,
eval_teacher: bool = True,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
preprocess_cfg: OptConfigType = None) -> None:
super(KnowledgeDistillationSingleStageDetector, self).__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
preprocess_cfg=preprocess_cfg)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self) -> bool:
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, batch_inputs: Tensor) -> Tensor:
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(batch_inputs)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self, batch_inputs: Tensor,
batch_data_samples: SampleList, **kwargs) -> dict:
"""
Args:
batch_inputs (Tensor): Input images of shape (N, C, H, W).
These should usually be mean centered and std scaled.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
batch_gt_instances = []
batch_gt_instances_ignore = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
batch_gt_instances.append(data_sample.gt_instances)
if 'ignored_instances' in data_sample:
batch_gt_instances_ignore.append(data_sample.ignored_instances)
else:
batch_gt_instances_ignore.append(None)
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(batch_inputs)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(batch_inputs)
losses = self.bbox_head.forward_train(x, label_assignment_results,
batch_gt_instances,
batch_img_metas,
batch_gt_instances_ignore)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from mmdet.registry import MODELS
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@MODELS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_backbone,
teacher_neck,
teacher_bbox_head,
teacher_ckpt,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KnowledgeDistillationSingleStageDetector,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = MODELS.build(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = MODELS.build(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = MODELS.build(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self):
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, img):
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results,
img_metas, gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import EmptyCacheHook
class TestEmptyCacheHook:
def test_emtpy_cache_hook(self):
Hook = EmptyCacheHook(True, True, True)
Runner = Mock()
Hook._after_iter(Runner)
Hook._before_epoch(Runner)
Hook._after_epoch(Runner)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
from mmengine.hooks import EmptyCacheHook
class TestEmptyCacheHook:
def test_emtpy_cache_hook(self):
Hook = EmptyCacheHook(True, True, True)
Runner = Mock()
Hook.after_iter(Runner)
Hook.before_epoch(Runner)
Hook.after_epoch(Runner)
|
from typing import Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import ( # noqa
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.embedding.jax_array import JaxArrayEmbedding
from docarray.typing.tensor.jaxarray import JaxArray # noqa: F401
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.embedding.torch import TorchEmbedding
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
T = TypeVar("T", bound="AnyEmbedding")
class AnyEmbedding(AnyTensor, EmbeddingMixin):
"""
Represents an embedding tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
class MyEmbeddingDoc(BaseDoc):
embedding: AnyEmbedding
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyEmbeddingDoc(embedding=tf.zeros(1000, 2))
type(doc.embedding) # TensorFlowEmbedding
# Example usage with PyTorch:
import torch
doc = MyEmbeddingDoc(embedding=torch.zeros(1000, 2))
type(doc.embedding) # TorchEmbedding
# Example usage with NumPy:
import numpy as np
doc = MyEmbeddingDoc(embedding=np.zeros((1000, 2)))
type(doc.embedding) # NdArrayEmbedding
'''
---
Raises:
TypeError: If the type of the value is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray]
"""
@classmethod
def _docarray_validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(TorchEmbedding, value)
elif isinstance(value, torch.Tensor):
return TorchEmbedding._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(TensorFlowEmbedding, value)
elif isinstance(value, tf.Tensor):
return TensorFlowEmbedding._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(JaxArrayEmbedding, value)
elif isinstance(value, jnp.ndarray):
return JaxArrayEmbedding._docarray_from_native(value) # noqa
try:
return NdArrayEmbedding._docarray_validate(value)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
from typing import TYPE_CHECKING, Any, Type, TypeVar, Union, cast
import numpy as np
from docarray.typing.tensor.embedding.embedding_mixin import EmbeddingMixin
from docarray.typing.tensor.embedding.ndarray import NdArrayEmbedding
from docarray.typing.tensor.tensor import AnyTensor
from docarray.utils._internal.misc import ( # noqa
is_jax_available,
is_tf_available,
is_torch_available,
)
jax_available = is_jax_available()
if jax_available:
import jax.numpy as jnp # type: ignore
from docarray.typing.tensor.embedding.jax_array import JaxArrayEmbedding
from docarray.typing.tensor.jaxarray import JaxArray # noqa: F401
torch_available = is_torch_available()
if torch_available:
import torch
from docarray.typing.tensor.embedding.torch import TorchEmbedding
from docarray.typing.tensor.torch_tensor import TorchTensor # noqa: F401
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf # type: ignore
from docarray.typing.tensor.embedding.tensorflow import TensorFlowEmbedding
from docarray.typing.tensor.tensorflow_tensor import TensorFlowTensor # noqa: F401
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar("T", bound="AnyEmbedding")
class AnyEmbedding(AnyTensor, EmbeddingMixin):
"""
Represents an embedding tensor object that can be used with TensorFlow, PyTorch, and NumPy type.
---
'''python
from docarray import BaseDoc
from docarray.typing import AnyEmbedding
class MyEmbeddingDoc(BaseDoc):
embedding: AnyEmbedding
# Example usage with TensorFlow:
import tensorflow as tf
doc = MyEmbeddingDoc(embedding=tf.zeros(1000, 2))
type(doc.embedding) # TensorFlowEmbedding
# Example usage with PyTorch:
import torch
doc = MyEmbeddingDoc(embedding=torch.zeros(1000, 2))
type(doc.embedding) # TorchEmbedding
# Example usage with NumPy:
import numpy as np
doc = MyEmbeddingDoc(embedding=np.zeros((1000, 2)))
type(doc.embedding) # NdArrayEmbedding
'''
---
Raises:
TypeError: If the type of the value is not one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray]
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, Any],
field: "ModelField",
config: "BaseConfig",
):
if torch_available:
if isinstance(value, TorchTensor):
return cast(TorchEmbedding, value)
elif isinstance(value, torch.Tensor):
return TorchEmbedding._docarray_from_native(value) # noqa
if tf_available:
if isinstance(value, TensorFlowTensor):
return cast(TensorFlowEmbedding, value)
elif isinstance(value, tf.Tensor):
return TensorFlowEmbedding._docarray_from_native(value) # noqa
if jax_available:
if isinstance(value, JaxArray):
return cast(JaxArrayEmbedding, value)
elif isinstance(value, jnp.ndarray):
return JaxArrayEmbedding._docarray_from_native(value) # noqa
try:
return NdArrayEmbedding.validate(value, field, config)
except Exception: # noqa
pass
raise TypeError(
f"Expected one of [torch.Tensor, tensorflow.Tensor, numpy.ndarray] "
f"compatible type, got {type(value)}"
)
|
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
image_size = (1024, 1024)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='base',
out_indices=[0, 1, 2, 3],
# TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
drop_path_rate=0.4,
layer_scale_init_value=0., # disable layer scale when using GRN
gap_before_final_norm=False,
use_grn=True, # V2 uses GRN
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[128, 256, 512, 1024]),
test_cfg=dict(
rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
rcnn=dict(nms=dict(type='soft_nms'))))
train_pipeline = [
dict(type='LoadImageFromFile', backend_args=_base_.backend_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
num_workers=8,
dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
'num_layers': 12
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
|
_base_ = [
'mmdet::_base_/models/mask-rcnn_r50_fpn.py',
'mmdet::_base_/datasets/coco_instance.py',
'mmdet::_base_/schedules/schedule_1x.py',
'mmdet::_base_/default_runtime.py'
]
# please install the mmclassification dev-1.x branch
# import mmcls.models to trigger register_module in mmcls
custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False)
checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth' # noqa
image_size = (1024, 1024)
model = dict(
backbone=dict(
_delete_=True,
type='mmcls.ConvNeXt',
arch='base',
out_indices=[0, 1, 2, 3],
# TODO: verify stochastic depth rate {0.1, 0.2, 0.3, 0.4}
drop_path_rate=0.4,
layer_scale_init_value=0., # disable layer scale when using GRN
gap_before_final_norm=False,
use_grn=True, # V2 uses GRN
init_cfg=dict(
type='Pretrained', checkpoint=checkpoint_file,
prefix='backbone.')),
neck=dict(in_channels=[128, 256, 512, 1024]),
test_cfg=dict(
rpn=dict(nms=dict(type='nms')), # TODO: does RPN use soft_nms?
rcnn=dict(nms=dict(type='soft_nms'))))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(
batch_size=4, # total_batch_size 32 = 8 GPUS x 4 images
num_workers=8,
dataset=dict(pipeline=train_pipeline))
max_epochs = 36
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.001, by_epoch=False, begin=0,
end=1000),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[27, 33],
gamma=0.1)
]
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
constructor='LearningRateDecayOptimizerConstructor',
paramwise_cfg={
'decay_rate': 0.95,
'decay_type': 'layer_wise', # TODO: sweep layer-wise lr decay?
'num_layers': 12
},
optimizer=dict(
_delete_=True,
type='AdamW',
lr=0.0001,
betas=(0.9, 0.999),
weight_decay=0.05,
))
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1))
|
from typing import Any, Dict
from pydantic.tools import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor, TorchTensor
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls, pb_msg: 'DocumentProto') -> 'ProtoMixin':
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
if content_type == 'tensor':
fields[field] = Tensor._read_from_proto(value.tensor)
elif content_type == 'torch_tensor':
fields[field] = TorchTensor._read_from_proto(value.torch_tensor)
elif content_type == 'embedding':
fields[field] = Embedding._read_from_proto(value.embedding)
elif content_type == 'any_url':
fields[field] = parse_obj_as(AnyUrl, value.any_url)
elif content_type == 'image_url':
fields[field] = parse_obj_as(ImageUrl, value.image_url)
elif content_type == 'id':
fields[field] = parse_obj_as(ID, value.id)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import Any, Dict
from pydantic.tools import parse_obj_as
from docarray.document.abstract_document import AbstractDocument
from docarray.document.base_node import BaseNode
from docarray.proto import DocumentProto, NodeProto
from docarray.typing import ID, AnyUrl, Embedding, ImageUrl, Tensor
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def from_protobuf(cls, pb_msg: 'DocumentProto') -> 'ProtoMixin':
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
# this if else statement need to be refactored it is too long
# the check should be delegated to the type level
if content_type == 'tensor':
fields[field] = Tensor._read_from_proto(value.tensor)
elif content_type == 'embedding':
fields[field] = Embedding._read_from_proto(value.embedding)
elif content_type == 'any_url':
fields[field] = parse_obj_as(AnyUrl, value.any_url)
elif content_type == 'image_url':
fields[field] = parse_obj_as(ImageUrl, value.image_url)
elif content_type == 'id':
fields[field] = parse_obj_as(ID, value.id)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_node_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
(
f'Field `{field}` contains cyclic reference in memory. '
'Could it be your Document is referring to itself?'
),
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_node_protobuf(self) -> NodeProto:
"""Convert Document into a NodeProto protobuf message. This function should be
called when the Document is nest into another Document that need to be
converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations
from mmdet.registry import TRANSFORMS
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = TRANSFORMS.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations,
LoadPanopticAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.transforms import LoadImageFromFile
from mmdet.datasets.transforms import LoadAnnotations, LoadPanopticAnnotations
from mmdet.registry import TRANSFORMS
def get_loading_pipeline(pipeline):
"""Only keep loading image and annotations related configuration.
Args:
pipeline (list[dict]): Data pipeline configs.
Returns:
list[dict]: The new pipeline list with only keep
loading image and annotations related configuration.
Examples:
>>> pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True),
... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
... dict(type='RandomFlip', flip_ratio=0.5),
... dict(type='Normalize', **img_norm_cfg),
... dict(type='Pad', size_divisor=32),
... dict(type='DefaultFormatBundle'),
... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
... ]
>>> expected_pipelines = [
... dict(type='LoadImageFromFile'),
... dict(type='LoadAnnotations', with_bbox=True)
... ]
>>> assert expected_pipelines ==\
... get_loading_pipeline(pipelines)
"""
loading_pipeline_cfg = []
for cfg in pipeline:
obj_cls = TRANSFORMS.get(cfg['type'])
# TODO:use more elegant way to distinguish loading modules
if obj_cls is not None and obj_cls in (LoadImageFromFile,
LoadAnnotations,
LoadPanopticAnnotations):
loading_pipeline_cfg.append(cfg)
assert len(loading_pipeline_cfg) == 2, \
'The data pipeline in your config file must include ' \
'loading image and annotations related pipeline.'
return loading_pipeline_cfg
|
import logging
import os
import signal
import sys
from abc import ABC, abstractmethod
from multiprocessing import Process, set_start_method
from typing import Optional
from backend.util.logging import configure_logging
from backend.util.metrics import sentry_init
logger = logging.getLogger(__name__)
_SERVICE_NAME = "MainProcess"
def get_service_name():
return _SERVICE_NAME
def set_service_name(name: str):
global _SERVICE_NAME
_SERVICE_NAME = name
class AppProcess(ABC):
"""
A class to represent an object that can be executed in a background process.
"""
process: Optional[Process] = None
cleaned_up = False
set_start_method("spawn", force=True)
configure_logging()
sentry_init()
# Methods that are executed INSIDE the process #
@abstractmethod
def run(self):
"""
The method that will be executed in the process.
"""
pass
@classmethod
@property
def service_name(cls) -> str:
return cls.__name__
@abstractmethod
def cleanup(self):
"""
Implement this method on a subclass to do post-execution cleanup,
e.g. disconnecting from a database or terminating child processes.
"""
pass
def health_check(self) -> str:
"""
A method to check the health of the process.
"""
return "OK"
def execute_run_command(self, silent):
signal.signal(signal.SIGTERM, self._self_terminate)
signal.signal(signal.SIGINT, self._self_terminate)
try:
if silent:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
set_service_name(self.service_name)
logger.info(f"[{self.service_name}] Starting...")
self.run()
except (KeyboardInterrupt, SystemExit) as e:
logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...")
finally:
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
logger.info(f"[{self.service_name}] Terminated.")
def _self_terminate(self, signum: int, frame):
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
sys.exit(0)
# Methods that are executed OUTSIDE the process #
def __enter__(self):
self.start(background=True)
return self
def __exit__(self, *args, **kwargs):
self.stop()
def start(self, background: bool = False, silent: bool = False, **proc_args) -> int:
"""
Start the background process.
Args:
background: Whether to run the process in the background.
silent: Whether to disable stdout and stderr.
proc_args: Additional arguments to pass to the process.
Returns:
the process id or 0 if the process is not running in the background.
"""
if not background:
self.execute_run_command(silent)
return 0
self.process = Process(
name=self.__class__.__name__,
target=self.execute_run_command,
args=(silent,),
**proc_args,
)
self.process.start()
self.health_check()
logger.info(f"[{self.service_name}] started with PID {self.process.pid}")
return self.process.pid or 0
def stop(self):
"""
Stop the background process.
"""
if not self.process:
return
self.process.terminate()
self.process.join()
logger.info(f"[{self.service_name}] with PID {self.process.pid} stopped")
self.process = None
|
import logging
import os
import signal
import sys
from abc import ABC, abstractmethod
from multiprocessing import Process, set_start_method
from typing import Optional
from backend.util.logging import configure_logging
from backend.util.metrics import sentry_init
logger = logging.getLogger(__name__)
_SERVICE_NAME = "MainProcess"
def get_service_name():
return _SERVICE_NAME
def set_service_name(name: str):
global _SERVICE_NAME
_SERVICE_NAME = name
class AppProcess(ABC):
"""
A class to represent an object that can be executed in a background process.
"""
process: Optional[Process] = None
cleaned_up = False
set_start_method("spawn", force=True)
configure_logging()
sentry_init()
# Methods that are executed INSIDE the process #
@abstractmethod
def run(self):
"""
The method that will be executed in the process.
"""
pass
@classmethod
@property
def service_name(cls) -> str:
return cls.__name__
def cleanup(self):
"""
Implement this method on a subclass to do post-execution cleanup,
e.g. disconnecting from a database or terminating child processes.
"""
pass
def health_check(self) -> str:
"""
A method to check the health of the process.
"""
return "OK"
def execute_run_command(self, silent):
signal.signal(signal.SIGTERM, self._self_terminate)
signal.signal(signal.SIGINT, self._self_terminate)
try:
if silent:
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
set_service_name(self.service_name)
logger.info(f"[{self.service_name}] Starting...")
self.run()
except (KeyboardInterrupt, SystemExit) as e:
logger.warning(f"[{self.service_name}] Terminated: {e}; quitting...")
finally:
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
logger.info(f"[{self.service_name}] Terminated.")
def _self_terminate(self, signum: int, frame):
if not self.cleaned_up:
self.cleanup()
self.cleaned_up = True
sys.exit(0)
# Methods that are executed OUTSIDE the process #
def __enter__(self):
self.start(background=True)
return self
def __exit__(self, *args, **kwargs):
self.stop()
def start(self, background: bool = False, silent: bool = False, **proc_args) -> int:
"""
Start the background process.
Args:
background: Whether to run the process in the background.
silent: Whether to disable stdout and stderr.
proc_args: Additional arguments to pass to the process.
Returns:
the process id or 0 if the process is not running in the background.
"""
if not background:
self.execute_run_command(silent)
return 0
self.process = Process(
name=self.__class__.__name__,
target=self.execute_run_command,
args=(silent,),
**proc_args,
)
self.process.start()
self.health_check()
logger.info(f"[{self.service_name}] started with PID {self.process.pid}")
return self.process.pid or 0
def stop(self):
"""
Stop the background process.
"""
if not self.process:
return
self.process.terminate()
self.process.join()
logger.info(f"[{self.service_name}] with PID {self.process.pid} stopped")
self.process = None
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from torchvision.prototype.tv_tensors import Label
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
|
import pathlib
from typing import Any, Dict, List, Tuple, Union
from torchdata.datapipes.iter import Filter, IterDataPipe, Mapper
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, EncodedImage, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import (
hint_sharding,
hint_shuffling,
path_comparator,
read_categories_file,
)
from .._api import register_dataset, register_info
NAME = "country211"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=read_categories_file(NAME))
@register_dataset(NAME)
class Country211(Dataset):
"""
- **homepage**: https://github.com/openai/CLIP/blob/main/data/country211.md
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "val", "test"))
self._split_folder_name = "valid" if split == "val" else split
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check)
def _resources(self) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self._categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._split_folder_name))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 31_650,
"val": 10_550,
"test": 21_100,
}[self._split]
def _generate_categories(self) -> List[str]:
resources = self._resources()
dp = resources[0].load(self._root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
# Disable yapf because it conflicts with isort.
# yapf: disable
from .misc import (align_tensor, aligned_bilinear, center_of_mass,
empty_instances, filter_gt_instances,
filter_scores_and_topk, flip_tensor, generate_coordinate,
images_to_levels, interpolate_as, levels_to_images,
mask2ndarray, multi_apply, relative_coordinate_maps,
rename_loss_dict, reweight_loss_dict,
samplelist_boxtype2tensor, select_single_mlvl,
sigmoid_geometric_mean, unfold_wo_center, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
from .wbf import weighted_boxes_fusion
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer', 'align_tensor', 'weighted_boxes_fusion'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .gaussian_target import (gather_feat, gaussian_radius,
gen_gaussian_target, get_local_maximum,
get_topk_from_heatmap, transpose_and_gather_feat)
from .image import imrenormalize
from .make_divisible import make_divisible
# Disable yapf because it conflicts with isort.
# yapf: disable
from .misc import (align_tensor, aligned_bilinear, center_of_mass,
empty_instances, filter_gt_instances,
filter_scores_and_topk, flip_tensor, generate_coordinate,
images_to_levels, interpolate_as, levels_to_images,
mask2ndarray, multi_apply, relative_coordinate_maps,
rename_loss_dict, reweight_loss_dict,
samplelist_boxtype2tensor, select_single_mlvl,
sigmoid_geometric_mean, unfold_wo_center, unmap,
unpack_gt_instances)
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .vlfuse_helper import BertEncoderLayer, VLFuse, permute_and_flatten
__all__ = [
'gaussian_radius', 'gen_gaussian_target', 'make_divisible',
'get_local_maximum', 'get_topk_from_heatmap', 'transpose_and_gather_feat',
'interpolate_as', 'sigmoid_geometric_mean', 'gather_feat',
'preprocess_panoptic_gt', 'get_uncertain_point_coords_with_randomness',
'get_uncertainty', 'unpack_gt_instances', 'empty_instances',
'center_of_mass', 'filter_scores_and_topk', 'flip_tensor',
'generate_coordinate', 'levels_to_images', 'mask2ndarray', 'multi_apply',
'select_single_mlvl', 'unmap', 'images_to_levels',
'samplelist_boxtype2tensor', 'filter_gt_instances', 'rename_loss_dict',
'reweight_loss_dict', 'relative_coordinate_maps', 'aligned_bilinear',
'unfold_wo_center', 'imrenormalize', 'VLFuse', 'permute_and_flatten',
'BertEncoderLayer', 'align_tensor'
]
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
stacklevel=5,
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
"""Question-answering with sources over a vector database."""
import warnings
from typing import Any
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.vectorstores import VectorStore
from pydantic import Field, model_validator
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
"""Question-answering with sources over a vector database."""
vectorstore: VectorStore = Field(exclude=True)
"""Vector Database to connect to."""
k: int = 4
"""Number of results to return from store"""
reduce_k_below_max_tokens: bool = False
"""Reduce the number of results to return from store based on tokens limit"""
max_tokens_limit: int = 3375
"""Restrict the docs to return from store based on tokens,
enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true"""
search_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Extra search args."""
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
num_docs = len(docs)
if self.reduce_k_below_max_tokens and isinstance(
self.combine_documents_chain,
StuffDocumentsChain,
):
tokens = [
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
for doc in docs
]
token_count = sum(tokens[:num_docs])
while token_count > self.max_tokens_limit:
num_docs -= 1
token_count -= tokens[num_docs]
return docs[:num_docs]
def _get_docs(
self,
inputs: dict[str, Any],
*,
run_manager: CallbackManagerForChainRun,
) -> list[Document]:
question = inputs[self.question_key]
docs = self.vectorstore.similarity_search(
question,
k=self.k,
**self.search_kwargs,
)
return self._reduce_tokens_below_limit(docs)
async def _aget_docs(
self,
inputs: dict[str, Any],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> list[Document]:
msg = "VectorDBQAWithSourcesChain does not support async"
raise NotImplementedError(msg)
@model_validator(mode="before")
@classmethod
def raise_deprecation(cls, values: dict) -> Any:
warnings.warn(
"`VectorDBQAWithSourcesChain` is deprecated - "
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
stacklevel=2,
)
return values
@property
def _chain_type(self) -> str:
return "vector_db_qa_with_sources_chain"
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.registry import MODELS
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg('detr/detr_r50_8xb2-150e_coco.py')
model = MODELS.build(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import InstanceData
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing import get_detector_cfg
from mmdet.utils import register_all_modules
class TestDETR(TestCase):
def setUp(self) -> None:
register_all_modules()
def test_detr_head_loss(self):
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
metainfo = {
'img_shape': (s, s),
'scale_factor': (1, 1),
'pad_shape': (s, s),
'batch_input_shape': (s, s)
}
img_metas = DetDataSample()
img_metas.set_metainfo(metainfo)
batch_data_samples = []
batch_data_samples.append(img_metas)
config = get_detector_cfg('detr/detr_r50_8xb2-150e_coco.py')
model = build_detector(config)
model.init_weights()
random_image = torch.rand(1, 3, s, s)
# Test that empty ground truth encourages the network to
# predict background
gt_instances = InstanceData()
gt_instances.bboxes = torch.empty((0, 4))
gt_instances.labels = torch.LongTensor([])
img_metas.gt_instances = gt_instances
batch_data_samples1 = []
batch_data_samples1.append(img_metas)
empty_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples1)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
self.assertGreater(loss.item(), 0,
'cls loss should be non-zero')
elif 'bbox' in key:
self.assertEqual(
loss.item(), 0,
'there should be no box loss when no ground true boxes')
elif 'iou' in key:
self.assertEqual(
loss.item(), 0,
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = InstanceData()
gt_instances.bboxes = torch.Tensor(
[[23.6667, 23.8757, 238.6326, 151.8874]])
gt_instances.labels = torch.LongTensor([2])
img_metas.gt_instances = gt_instances
batch_data_samples2 = []
batch_data_samples2.append(img_metas)
one_gt_losses = model.loss(
random_image, batch_data_samples=batch_data_samples2)
for loss in one_gt_losses.values():
self.assertGreater(
loss.item(), 0,
'cls loss, or box loss, or iou loss should be non-zero')
model.eval()
# test _forward
model._forward(random_image, batch_data_samples=batch_data_samples2)
# test only predict
model.predict(
random_image, batch_data_samples=batch_data_samples2, rescale=True)
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
import logging
import sys
import traceback
from datetime import datetime
from datasets import load_dataset
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
# Set the log level to INFO to get more information
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# You can specify any Hugging Face pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
train_batch_size = 16
output_dir = "output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# 1. Here we define our SentenceTransformer model. If not already a Sentence Transformer model, it will automatically
# create one with "mean" pooling.
model = SentenceTransformer(model_name)
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
# We'll start with 10k training samples, but you can increase this to get a stronger model
logging.info("Read AllNLI train dataset")
train_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="train").select(range(10000))
eval_dataset = load_dataset("sentence-transformers/all-nli", "pair-class", split="dev").select(range(1000))
logging.info(train_dataset)
# 3. Define our training loss: https://sbert.net/docs/package_reference/sentence_transformer/losses.html#softmaxloss
train_loss = losses.SoftmaxLoss(
model=model,
sentence_embedding_dimension=model.get_sentence_embedding_dimension(),
num_labels=3,
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
dev_evaluator = EmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-dev",
)
logging.info("Evaluation before training:")
dev_evaluator(model)
# 5. Define the training arguments
args = SentenceTransformerTrainingArguments(
# Required parameter:
output_dir=output_dir,
# Optional training parameters:
num_train_epochs=1,
per_device_train_batch_size=train_batch_size,
per_device_eval_batch_size=train_batch_size,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
# Optional tracking/debugging parameters:
eval_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
logging_steps=100,
run_name="nli-v1", # Will be used in W&B if `wandb` is installed
)
# 6. Create the trainer & start training
trainer = SentenceTransformerTrainer(
model=model,
args=args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=train_loss,
evaluator=dev_evaluator,
)
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
test_evaluator = EmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name="sts-test",
)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
final_output_dir = f"{output_dir}/final"
model.save(final_output_dir)
# 9. (Optional) save the model to the Hugging Face Hub!
# It is recommended to run `huggingface-cli login` to log into your Hugging Face account first
model_name = model_name if "/" not in model_name else model_name.split("/")[-1]
try:
model.push_to_hub(f"{model_name}-nli-v1")
except Exception:
logging.error(
f"Error uploading model to the Hugging Face Hub:\n{traceback.format_exc()}To upload it manually, you can run "
f"`huggingface-cli login`, followed by loading the model using `model = SentenceTransformer({final_output_dir!r})` "
f"and saving it using `model.push_to_hub('{model_name}-nli-v1')`."
)
|
"""
The system trains BERT (or any other transformer model like RoBERTa, DistilBERT etc.) on the SNLI + MultiNLI (AllNLI) dataset
with softmax loss function. At every 1000 training steps, the model is evaluated on the
STS benchmark dataset
Usage:
python training_nli.py
OR
python training_nli.py pretrained_transformer_model_name
"""
from torch.utils.data import DataLoader
import math
from sentence_transformers import models, losses
from sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample
from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator
import logging
from datetime import datetime
import sys
import os
import gzip
import csv
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
nli_dataset_path = "data/AllNLI.tsv.gz"
sts_dataset_path = "data/stsbenchmark.tsv.gz"
if not os.path.exists(nli_dataset_path):
util.http_get("https://sbert.net/datasets/AllNLI.tsv.gz", nli_dataset_path)
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base
model_name = sys.argv[1] if len(sys.argv) > 1 else "bert-base-uncased"
# Read the dataset
train_batch_size = 16
model_save_path = (
"output/training_nli_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True,
pooling_mode_cls_token=False,
pooling_mode_max_tokens=False,
)
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# Read the AllNLI.tsv.gz file and create the training dataset
logging.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
with gzip.open(nli_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "train":
label_id = label2int[row["label"]]
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=label_id))
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.SoftmaxLoss(
model=model, sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=len(label2int)
)
# Read STSbenchmark dataset and use it as development set
logging.info("Read STSbenchmark dev dataset")
dev_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "dev":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
dev_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
dev_samples, batch_size=train_batch_size, name="sts-dev"
)
# Configure the training
num_epochs = 1
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
evaluator=dev_evaluator,
epochs=num_epochs,
evaluation_steps=1000,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##############################################################################
#
# Load the stored model and evaluate its performance on STS benchmark dataset
#
##############################################################################
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
if row["split"] == "test":
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
model = SentenceTransformer(model_save_path)
test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(
test_samples, batch_size=train_batch_size, name="sts-test"
)
test_evaluator(model, output_path=model_save_path)
|
from __future__ import annotations
import json
import os
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
import json
import os
from typing import Dict
from torch import Tensor, nn
class Dropout(nn.Module):
"""Dropout layer.
Args:
dropout: Sets a dropout value for dense layer.
"""
def __init__(self, dropout: float = 0.2):
super(Dropout, self).__init__()
self.dropout = dropout
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, features: Dict[str, Tensor]):
features.update({"sentence_embedding": self.dropout_layer(features["sentence_embedding"])})
return features
def save(self, output_path):
with open(os.path.join(output_path, "config.json"), "w") as fOut:
json.dump({"dropout": self.dropout}, fOut)
@staticmethod
def load(input_path):
with open(os.path.join(input_path, "config.json")) as fIn:
config = json.load(fIn)
model = Dropout(**config)
return model
|
from typing import Any, Dict
from backend.data.block import Block
from backend.util.request import Requests
from ._api import Color, CustomerDetails, OrderItem, Profile
class Slant3DBlockBase(Block):
"""Base block class for Slant3D API interactions"""
BASE_URL = "https://www.slant3dapi.com/api"
def _get_headers(self, api_key: str) -> Dict[str, str]:
return {"api-key": api_key, "Content-Type": "application/json"}
def _make_request(self, method: str, endpoint: str, api_key: str, **kwargs) -> Dict:
url = f"{self.BASE_URL}/{endpoint}"
response = Requests().request(
method=method, url=url, headers=self._get_headers(api_key), **kwargs
)
if not response.ok:
error_msg = response.json().get("error", "Unknown error")
raise RuntimeError(f"API request failed: {error_msg}")
return response.json()
def _check_valid_color(self, profile: Profile, color: Color, api_key: str) -> str:
response = self._make_request(
"GET",
"filament",
api_key,
params={"profile": profile.value, "color": color.value},
)
if profile == Profile.PLA:
color_tag = color.value
else:
color_tag = f"{profile.value.lower()}{color.value.capitalize()}"
valid_tags = [filament["colorTag"] for filament in response["filaments"]]
if color_tag not in valid_tags:
raise ValueError(
f"""Invalid color profile combination {color_tag}.
Valid colors for {profile.value} are:
{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])}
"""
)
return color_tag
def _convert_to_color(self, profile: Profile, color: Color, api_key: str) -> str:
return self._check_valid_color(profile, color, api_key)
def _format_order_data(
self,
customer: CustomerDetails,
order_number: str,
items: list[OrderItem],
api_key: str,
) -> list[dict[str, Any]]:
"""Helper function to format order data for API requests"""
orders = []
for item in items:
order_data = {
"email": customer.email,
"phone": customer.phone,
"name": customer.name,
"orderNumber": order_number,
"filename": item.file_url,
"fileURL": item.file_url,
"bill_to_street_1": customer.address,
"bill_to_city": customer.city,
"bill_to_state": customer.state,
"bill_to_zip": customer.zip,
"bill_to_country_as_iso": customer.country_iso,
"bill_to_is_US_residential": str(customer.is_residential).lower(),
"ship_to_name": customer.name,
"ship_to_street_1": customer.address,
"ship_to_city": customer.city,
"ship_to_state": customer.state,
"ship_to_zip": customer.zip,
"ship_to_country_as_iso": customer.country_iso,
"ship_to_is_US_residential": str(customer.is_residential).lower(),
"order_item_name": item.file_url,
"order_quantity": item.quantity,
"order_image_url": "",
"order_sku": "NOT_USED",
"order_item_color": self._convert_to_color(
item.profile, item.color, api_key
),
"profile": item.profile.value,
}
orders.append(order_data)
return orders
|
from typing import Any, Dict
from backend.data.block import Block
from backend.util.request import requests
from ._api import Color, CustomerDetails, OrderItem, Profile
class Slant3DBlockBase(Block):
"""Base block class for Slant3D API interactions"""
BASE_URL = "https://www.slant3dapi.com/api"
def _get_headers(self, api_key: str) -> Dict[str, str]:
return {"api-key": api_key, "Content-Type": "application/json"}
def _make_request(self, method: str, endpoint: str, api_key: str, **kwargs) -> Dict:
url = f"{self.BASE_URL}/{endpoint}"
response = requests.request(
method=method, url=url, headers=self._get_headers(api_key), **kwargs
)
if not response.ok:
error_msg = response.json().get("error", "Unknown error")
raise RuntimeError(f"API request failed: {error_msg}")
return response.json()
def _check_valid_color(self, profile: Profile, color: Color, api_key: str) -> str:
response = self._make_request(
"GET",
"filament",
api_key,
params={"profile": profile.value, "color": color.value},
)
if profile == Profile.PLA:
color_tag = color.value
else:
color_tag = f"{profile.value.lower()}{color.value.capitalize()}"
valid_tags = [filament["colorTag"] for filament in response["filaments"]]
if color_tag not in valid_tags:
raise ValueError(
f"""Invalid color profile combination {color_tag}.
Valid colors for {profile.value} are:
{','.join([filament['colorTag'].replace(profile.value.lower(), '') for filament in response['filaments'] if filament['profile'] == profile.value])}
"""
)
return color_tag
def _convert_to_color(self, profile: Profile, color: Color, api_key: str) -> str:
return self._check_valid_color(profile, color, api_key)
def _format_order_data(
self,
customer: CustomerDetails,
order_number: str,
items: list[OrderItem],
api_key: str,
) -> list[dict[str, Any]]:
"""Helper function to format order data for API requests"""
orders = []
for item in items:
order_data = {
"email": customer.email,
"phone": customer.phone,
"name": customer.name,
"orderNumber": order_number,
"filename": item.file_url,
"fileURL": item.file_url,
"bill_to_street_1": customer.address,
"bill_to_city": customer.city,
"bill_to_state": customer.state,
"bill_to_zip": customer.zip,
"bill_to_country_as_iso": customer.country_iso,
"bill_to_is_US_residential": str(customer.is_residential).lower(),
"ship_to_name": customer.name,
"ship_to_street_1": customer.address,
"ship_to_city": customer.city,
"ship_to_state": customer.state,
"ship_to_zip": customer.zip,
"ship_to_country_as_iso": customer.country_iso,
"ship_to_is_US_residential": str(customer.is_residential).lower(),
"order_item_name": item.file_url,
"order_quantity": item.quantity,
"order_image_url": "",
"order_sku": "NOT_USED",
"order_item_color": self._convert_to_color(
item.profile, item.color, api_key
),
"profile": item.profile.value,
}
orders.append(order_data)
return orders
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import PointCloud3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensor = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensor, np.ndarray)
def test_point_cloud_np():
image = parse_obj_as(PointCloud3D, np.zeros((10, 10, 3)))
assert (image.tensor == np.zeros((10, 10, 3))).all()
def test_point_cloud_torch():
image = parse_obj_as(PointCloud3D, torch.zeros(10, 10, 3))
assert (image.tensor == torch.zeros(10, 10, 3)).all()
def test_point_cloud_shortcut_doc():
class MyDoc(BaseDocument):
image: PointCloud3D
image2: PointCloud3D
image3: PointCloud3D
doc = MyDoc(
image='http://myurl.ply',
image2=np.zeros((10, 10, 3)),
image3=torch.zeros(10, 10, 3),
)
assert doc.image.url == 'http://myurl.ply'
assert (doc.image2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.image3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
from docarray.documents import PointCloud3D
from tests import TOYDATA_DIR
LOCAL_OBJ_FILE = str(TOYDATA_DIR / 'tetrahedron.obj')
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_OBJ_FILE, REMOTE_OBJ_FILE])
def test_point_cloud(file_url):
print(f"file_url = {file_url}")
point_cloud = PointCloud3D(url=file_url)
point_cloud.tensor = point_cloud.url.load(samples=100)
assert isinstance(point_cloud.tensor, np.ndarray)
|
import argparse
import urllib
from http import HTTPStatus
from jina.enums import GatewayProtocolType
from jina.helper import parse_host_scheme
from jina.logging.predefined import default_logger
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.gateway import GatewayRuntime
from jina.serve.runtimes.worker import WorkerRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(
f'{hostname}:{port}',
protocol=GatewayProtocolType.from_string(protocol),
)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.info(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.info(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
import argparse
import urllib
from http import HTTPStatus
from jina.logging.predefined import default_logger
from jina.helper import parse_host_scheme
class NetworkChecker:
"""Check if a BaseDeployment is running or not."""
def __init__(self, args: 'argparse.Namespace'):
"""
Create a new :class:`NetworkChecker`.
:param args: args provided by the CLI.
"""
import time
from jina import Client
from jina.logging.profile import TimeContext
from jina.serve.runtimes.worker import WorkerRuntime
from jina.serve.runtimes.gateway import GatewayRuntime
try:
total_time = 0
total_success = 0
for j in range(args.attempts):
with TimeContext(
f'ping {args.target} on {args.host} at {j} round', default_logger
) as tc:
if args.target == 'executor':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = WorkerRuntime.is_ready(f'{hostname}:{port}')
elif args.target == 'gateway':
hostname, port, protocol, _ = parse_host_scheme(args.host)
r = GatewayRuntime.is_ready(f'{hostname}:{port}', protocol=protocol)
elif args.target == 'flow':
r = Client(host=args.host).is_flow_ready(timeout=args.timeout)
if not r:
default_logger.warning(
'not responding, attempt (%d/%d) in 1s'
% (j + 1, args.attempts)
)
else:
total_success += 1
total_time += tc.duration
if args.attempts > 0:
time.sleep(1)
if total_success < args.attempts:
default_logger.warning(
'message lost %.0f%% (%d/%d) '
% (
(1 - total_success / args.attempts) * 100,
args.attempts - total_success,
args.attempts,
)
)
if total_success > 0:
default_logger.info(
'avg. latency: %.0f ms' % (total_time / total_success * 1000)
)
if total_success >= args.min_successful_attempts:
default_logger.info(
f'readiness check succeeded {total_success} times!!!'
)
exit(0)
else:
default_logger.info(
f'readiness check succeeded {total_success} times, less than {args.min_successful_attempts}'
)
except KeyboardInterrupt:
pass
# returns 1 (anomaly) when it comes to here
exit(1)
|
_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './queryinst_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True,
rpn=None,
rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from mustc.dataset import MUSTC
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.pipelines import RNNTBundle
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
logger = logging.getLogger(__name__)
@dataclass
class Config:
dataset: Callable
bundle: RNNTBundle
_CONFIGS = {
MODEL_TYPE_LIBRISPEECH: Config(
partial(torchaudio.datasets.LIBRISPEECH, url="test-clean"),
EMFORMER_RNNT_BASE_LIBRISPEECH,
),
MODEL_TYPE_MUSTC: Config(
partial(MUSTC, subset="tst-COMMON"),
EMFORMER_RNNT_BASE_MUSTC,
),
MODEL_TYPE_TEDLIUM3: Config(
partial(torchaudio.datasets.TEDLIUM, release="release3", subset="test"),
EMFORMER_RNNT_BASE_TEDLIUM3,
),
}
def run_eval_streaming(args):
dataset = _CONFIGS[args.model_type].dataset(args.dataset_path)
bundle = _CONFIGS[args.model_type].bundle
decoder = bundle.get_decoder()
token_processor = bundle.get_token_processor()
feature_extractor = bundle.get_feature_extractor()
streaming_feature_extractor = bundle.get_streaming_feature_extractor()
hop_length = bundle.hop_length
num_samples_segment = bundle.segment_length * hop_length
num_samples_segment_right_context = num_samples_segment + bundle.right_context_length * hop_length
for idx in range(10):
sample = dataset[idx]
waveform = sample[0].squeeze()
# Streaming decode.
state, hypothesis = None, None
for idx in range(0, len(waveform), num_samples_segment):
segment = waveform[idx : idx + num_samples_segment_right_context]
segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment)))
with torch.no_grad():
features, length = streaming_feature_extractor(segment)
hypos, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis)
hypothesis = hypos[0]
transcript = token_processor(hypothesis[0], lstrip=False)
print(transcript, end="", flush=True)
print()
# Non-streaming decode.
with torch.no_grad():
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 10)
print(token_processor(hypos[0][0]))
print()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--model-type", type=str, choices=_CONFIGS.keys(), required=True)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
required=True,
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
run_eval_streaming(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
"""The demo script for testing the pre-trained Emformer RNNT pipelines.
Example:
python pipeline_demo.py --model-type librispeech --dataset-path ./datasets/librispeech
"""
import logging
import pathlib
from argparse import ArgumentParser, RawTextHelpFormatter
from dataclasses import dataclass
from functools import partial
from typing import Callable
import torch
import torchaudio
from common import MODEL_TYPE_LIBRISPEECH, MODEL_TYPE_MUSTC, MODEL_TYPE_TEDLIUM3
from mustc.dataset import MUSTC
from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH
from torchaudio.pipelines import RNNTBundle
from torchaudio.prototype.pipelines import EMFORMER_RNNT_BASE_MUSTC, EMFORMER_RNNT_BASE_TEDLIUM3
logger = logging.getLogger(__name__)
@dataclass
class Config:
dataset: Callable
bundle: RNNTBundle
_CONFIGS = {
MODEL_TYPE_LIBRISPEECH: Config(
partial(torchaudio.datasets.LIBRISPEECH, url="test-clean"),
EMFORMER_RNNT_BASE_LIBRISPEECH,
),
MODEL_TYPE_MUSTC: Config(
partial(MUSTC, subset="tst-COMMON"),
EMFORMER_RNNT_BASE_MUSTC,
),
MODEL_TYPE_TEDLIUM3: Config(
partial(torchaudio.datasets.TEDLIUM, release="release3", subset="test"),
EMFORMER_RNNT_BASE_TEDLIUM3,
),
}
def run_eval_streaming(args):
dataset = _CONFIGS[args.model_type].dataset(args.dataset_path)
bundle = _CONFIGS[args.model_type].bundle
decoder = bundle.get_decoder()
token_processor = bundle.get_token_processor()
feature_extractor = bundle.get_feature_extractor()
streaming_feature_extractor = bundle.get_streaming_feature_extractor()
hop_length = bundle.hop_length
num_samples_segment = bundle.segment_length * hop_length
num_samples_segment_right_context = num_samples_segment + bundle.right_context_length * hop_length
for idx in range(10):
sample = dataset[idx]
waveform = sample[0].squeeze()
# Streaming decode.
state, hypothesis = None, None
for idx in range(0, len(waveform), num_samples_segment):
segment = waveform[idx : idx + num_samples_segment_right_context]
segment = torch.nn.functional.pad(segment, (0, num_samples_segment_right_context - len(segment)))
with torch.no_grad():
features, length = streaming_feature_extractor(segment)
hypos, state = decoder.infer(features, length, 10, state=state, hypothesis=hypothesis)
hypothesis = hypos[0]
transcript = token_processor(hypothesis.tokens, lstrip=False)
print(transcript, end="", flush=True)
print()
# Non-streaming decode.
with torch.no_grad():
features, length = feature_extractor(waveform)
hypos = decoder(features, length, 10)
print(token_processor(hypos[0].tokens))
print()
def parse_args():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--model-type", type=str, choices=_CONFIGS.keys(), required=True)
parser.add_argument(
"--dataset-path",
type=pathlib.Path,
help="Path to dataset.",
required=True,
)
parser.add_argument("--debug", action="store_true", help="whether to use debug level for logging")
return parser.parse_args()
def init_logger(debug):
fmt = "%(asctime)s %(message)s" if debug else "%(message)s"
level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(format=fmt, level=level, datefmt="%Y-%m-%d %H:%M:%S")
def cli_main():
args = parse_args()
init_logger(args.debug)
run_eval_streaming(args)
if __name__ == "__main__":
cli_main()
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:
if version.parse(hfh.__version__) < version.parse("0.11.0"):
# old versions of hfh don't url-encode the file path
path = quote(path)
return hfh.hf_hub_url(repo_id, path, repo_type="dataset", revision=revision)
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
def hf_hub_url(repo_id: str, path: str, revision: Optional[str] = None) -> str:
return hfh.hf_hub_url(repo_id, quote(path), repo_type="dataset", revision=revision)
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_scalar(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
from typing import Any, Optional, Sequence, Tuple, Union
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class IterTimerHook(Hook):
"""A hook that logs the time spent during iteration.
E.g. ``data_time`` for loading data and ``time`` for a model train step.
"""
priority = 'NORMAL'
def _before_epoch(self, runner, mode: str = 'train') -> None:
"""Record time flag before start a epoch.
Args:
runner (Runner): The runner of the training process.
mode (str): Current mode of runner. Defaults to 'train'.
"""
self.t = time.time()
def _before_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
mode: str = 'train') -> None:
"""Logging time for loading data and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/data_time',
time.time() - self.t)
def _after_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[Union[dict,
Sequence[BaseDataElement]]] = None,
mode: str = 'train') -> None:
"""Logging time for a iteration and update the time flag.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model. Defaults
to None.
mode (str): Current mode of runner. Defaults to 'train'.
"""
# TODO: update for new logging system
runner.message_hub.update_log(f'{mode}/time', time.time() - self.t)
self.t = time.time()
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optim_wrapper = dict(
optimizer=dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001))
# dataset settings
train_dataloader = dict(batch_size=1, num_workers=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py'
model = dict(
backbone=dict(
embed_dims=64,
num_layers=[3, 6, 40, 3],
mlp_ratios=(4, 4, 4, 4),
init_cfg=dict(checkpoint='https://github.com/whai362/PVT/'
'releases/download/v2/pvt_v2_b5.pth')),
neck=dict(in_channels=[64, 128, 320, 512]))
# optimizer
optimizer = dict(
_delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001)
# dataset settings
data = dict(samples_per_gpu=1, workers_per_gpu=1)
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (1 samples per GPU)
auto_scale_lr = dict(base_batch_size=8)
|
from typing import Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel
_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \
in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic(
pydantic_schemas: Union[list[type[BaseModel]], type[BaseModel]],
llm: BaseLanguageModel,
system_message: str = _EXTRACTION_TEMPLATE,
) -> Runnable:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages(
[
("system", system_message),
("user", "{input}"),
]
)
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
tools = [{"type": "function", "function": d} for d in functions]
model = llm.bind(tools=tools)
chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
return chain
|
from typing import List, Type, Union
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers.openai_tools import PydanticToolsParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import Runnable
from langchain_core.utils.function_calling import convert_pydantic_to_openai_function
from pydantic import BaseModel
_EXTRACTION_TEMPLATE = """Extract and save the relevant entities mentioned \
in the following passage together with their properties.
If a property is not present and is not required in the function parameters, do not include it in the output.""" # noqa: E501
@deprecated(
since="0.1.14",
message=(
"LangChain has introduced a method called `with_structured_output` that"
"is available on ChatModels capable of tool calling."
"You can read more about the method here: "
"<https://python.langchain.com/docs/modules/model_io/chat/structured_output/>. "
"Please follow our extraction use case documentation for more guidelines"
"on how to do information extraction with LLMs."
"<https://python.langchain.com/docs/use_cases/extraction/>. "
"with_structured_output does not currently support a list of pydantic schemas. "
"If this is a blocker or if you notice other issues, please provide "
"feedback here:"
"<https://github.com/langchain-ai/langchain/discussions/18154>"
),
removal="1.0",
alternative=(
"""
from pydantic import BaseModel, Field
from langchain_anthropic import ChatAnthropic
class Joke(BaseModel):
setup: str = Field(description="The setup of the joke")
punchline: str = Field(description="The punchline to the joke")
# Or any other chat model that supports tools.
# Please reference to to the documentation of structured_output
# to see an up to date list of which models support
# with_structured_output.
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
structured_llm = model.with_structured_output(Joke)
structured_llm.invoke("Tell me a joke about cats.
Make sure to call the Joke function.")
"""
),
)
def create_extraction_chain_pydantic(
pydantic_schemas: Union[List[Type[BaseModel]], Type[BaseModel]],
llm: BaseLanguageModel,
system_message: str = _EXTRACTION_TEMPLATE,
) -> Runnable:
"""Creates a chain that extracts information from a passage.
Args:
pydantic_schemas: The schema of the entities to extract.
llm: The language model to use.
system_message: The system message to use for extraction.
Returns:
A runnable that extracts information from a passage.
"""
if not isinstance(pydantic_schemas, list):
pydantic_schemas = [pydantic_schemas]
prompt = ChatPromptTemplate.from_messages(
[
("system", system_message),
("user", "{input}"),
]
)
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
tools = [{"type": "function", "function": d} for d in functions]
model = llm.bind(tools=tools)
chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
return chain
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
index = self.to_sql_kwargs.pop("index", False)
written = self._write(index=index, **self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, index, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, index, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
# try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
written = self._write(**self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql,
[(offset, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
disable=not logging.is_progress_bar_enabled(),
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa
model = dict(
backbone=dict(
init_cfg=dict(
type='Pretrained', prefix='backbone.', checkpoint=checkpoint)))
optim_wrapper = dict(
optimizer=dict(_delete_=True, type='AdamW', lr=0.0002, weight_decay=0.05),
paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..annoy_searcher import AnnoySearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
def test_simple_annoy():
from annoy import AnnoyIndex
_index = AnnoyIndex(5, 'angular')
for j in range(3):
_index.add_item(j, np.random.random((5,)))
_index.build(4)
idx1, _ = _index.get_nns_by_vector(
np.random.random((5,)), 3, include_distances=True
)
assert len(idx1) == 3
@pytest.mark.parametrize(['metric', 'is_distance'],
[('angular', True), ('euclidean', True), ('manhattan', True), ('hamming', True),
('dot', True), ('angular', False), ('euclidean', False), ('manhattan', False),
('hamming', False), ('dot', False)])
def test_metric(tmpdir, metric, is_distance):
metas = {'workspace': str(tmpdir), 'name': 'searcher', }
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, metric=metric, is_distance=is_distance, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert docs[0].matches[i].scores[metric].value >= docs[0].matches[i + 1].scores[metric].value
else:
assert docs[0].matches[i].scores[metric].value <= docs[0].matches[i + 1].scores[metric].value
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
|
__copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
import numpy as np
import pytest
from jina import Document, DocumentArray
from jina.executors.metas import get_default_metas
from jina_commons.indexers.dump import import_vectors
from ..annoy import AnnoySearcher
# fix the seed here
np.random.seed(500)
docs = DocumentArray([Document(embedding=np.random.random(10)) for i in range(10)])
search_doc = DocumentArray([Document(embedding=np.random.random(10))])
DUMP_PATH = 'tests/dump1'
TOP_K = 5
@pytest.fixture(scope='function', autouse=True)
def metas(tmpdir):
os.environ['TEST_WORKSPACE'] = str(tmpdir)
metas = get_default_metas()
metas['workspace'] = os.environ['TEST_WORKSPACE']
yield metas
del os.environ['TEST_WORKSPACE']
def test_simple_annoy():
from annoy import AnnoyIndex
_index = AnnoyIndex(5, 'angular')
for j in range(3):
_index.add_item(j, np.random.random((5,)))
_index.build(4)
idx1, _ = _index.get_nns_by_vector(
np.random.random((5,)), 3, include_distances=True
)
assert len(idx1) == 3
@pytest.mark.parametrize(['metric', 'is_distance'],
[('angular', True), ('euclidean', True), ('manhattan', True), ('hamming', True),
('dot', True), ('angular', False), ('euclidean', False), ('manhattan', False),
('hamming', False), ('dot', False)])
def test_metric(tmpdir, metric, is_distance):
metas = {'workspace': str(tmpdir), 'name': 'searcher', }
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, metric=metric, is_distance=is_distance, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == TOP_K
for i in range(len(docs[0].matches) - 1):
if not is_distance:
assert docs[0].matches[i].scores[metric].value >= docs[0].matches[i + 1].scores[metric].value
else:
assert docs[0].matches[i].scores[metric].value <= docs[0].matches[i + 1].scores[metric].value
def test_query_vector(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(dump_path=DUMP_PATH, default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
ids, vecs = import_vectors(DUMP_PATH, str(0))
ids = np.array(list(ids))
vecs = np.array(list(vecs))
assert len(docs) == 1
assert len(docs[0].matches) == TOP_K
assert docs[0].matches[0].id in ids
assert len(docs[0].matches[0].embedding) == 7
assert docs[0].matches[0].embedding in vecs
da = DocumentArray([Document(id=0), Document(id=1), Document(id=2)])
indexer.fill_embedding(da)
for i, doc in enumerate(da):
assert list(doc.embedding)
def test_query_vector_empty(tmpdir):
metas = {'workspace': str(tmpdir), 'name': 'searcher'}
runtime_args = {'pea_id': 0, 'replica_id': 0}
indexer = AnnoySearcher(default_top_k=TOP_K, metas=metas, runtime_args=runtime_args)
docs = DocumentArray([Document(embedding=np.random.random(7))])
indexer.search(docs, {})
assert len(docs[0].matches) == 0
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import VideoDoc
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = VideoDoc(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(VideoDoc, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(VideoDoc, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(VideoDoc, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDocument):
video: VideoDoc
video2: VideoDoc
video3: VideoDoc
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import numpy as np
import pytest
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.documents import Video
from docarray.typing import AudioNdArray, NdArray, VideoNdArray
from docarray.utils.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
import tensorflow._api.v2.experimental.numpy as tnp
LOCAL_VIDEO_FILE = str(TOYDATA_DIR / 'mov_bbb.mp4')
REMOTE_VIDEO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/mov_bbb.mp4?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize('file_url', [LOCAL_VIDEO_FILE, REMOTE_VIDEO_FILE])
def test_video(file_url):
vid = Video(url=file_url)
vid.tensor, vid.audio.tensor, vid.key_frame_indices = vid.url.load()
assert isinstance(vid.tensor, VideoNdArray)
assert isinstance(vid.audio.tensor, AudioNdArray)
assert isinstance(vid.key_frame_indices, NdArray)
def test_video_np():
video = parse_obj_as(Video, np.zeros((10, 10, 3)))
assert (video.tensor == np.zeros((10, 10, 3))).all()
def test_video_torch():
video = parse_obj_as(Video, torch.zeros(10, 10, 3))
assert (video.tensor == torch.zeros(10, 10, 3)).all()
@pytest.mark.tensorflow
def test_video_tensorflow():
video = parse_obj_as(Video, tf.zeros((10, 10, 3)))
assert tnp.allclose(video.tensor.tensor, tf.zeros((10, 10, 3)))
def test_video_shortcut_doc():
class MyDoc(BaseDocument):
video: Video
video2: Video
video3: Video
doc = MyDoc(
video='http://myurl.mp4',
video2=np.zeros((10, 10, 3)),
video3=torch.zeros(10, 10, 3),
)
assert doc.video.url == 'http://myurl.mp4'
assert (doc.video2.tensor == np.zeros((10, 10, 3))).all()
assert (doc.video3.tensor == torch.zeros(10, 10, 3)).all()
|
import os
import pytest
import respx
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.core.schema import NodeWithScore, Document
from typing import Any
@pytest.fixture()
def mock_local_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://test_url/v1/models").respond(
json={"data": [{"id": "model1"}]}
)
def get_api_key(instance: Any) -> str:
return instance._api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as e:
Interface()
assert "API key is required" in str(e.value)
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
Interface(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = Interface(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(Interface()) == "ENV"
assert get_api_key(Interface(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = Interface(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = Interface(model=model, **{**mode, **{param: masked_env_var}})
assert client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
|
import os
import pytest
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank as Interface
from llama_index.core.schema import NodeWithScore, Document
from typing import Any
from requests_mock import Mocker
@pytest.fixture()
def mock_local_models(requests_mock: Mocker) -> None:
requests_mock.get(
"https://test_url/v1/models",
json={
"data": [
{"id": "model1"},
]
},
)
def get_api_key(instance: Any) -> str:
return instance._api_key
def test_create_default_url_without_api_key(masked_env_var: str) -> None:
with pytest.raises(ValueError) as e:
Interface()
assert "API key is required" in str(e.value)
@pytest.mark.usefixtures("mock_local_models")
def test_create_unknown_url_without_api_key(masked_env_var: str) -> None:
Interface(base_url="https://test_url/v1")
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_create_with_api_key(param: str, masked_env_var: str) -> None:
instance = Interface(**{param: "just testing no failure"})
assert get_api_key(instance) == "just testing no failure"
def test_api_key_priority(masked_env_var: str) -> None:
try:
os.environ["NVIDIA_API_KEY"] = "ENV"
assert get_api_key(Interface()) == "ENV"
assert get_api_key(Interface(nvidia_api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="PARAM")) == "PARAM"
assert get_api_key(Interface(api_key="LOW", nvidia_api_key="HIGH")) == "HIGH"
finally:
# we must clean up environ or it may impact other tests
del os.environ["NVIDIA_API_KEY"]
@pytest.mark.integration()
def test_bogus_api_key_error(masked_env_var: str) -> None:
client = Interface(nvidia_api_key="BOGUS")
with pytest.raises(Exception) as exc_info:
client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
message = str(exc_info.value)
assert "401" in message
@pytest.mark.integration()
@pytest.mark.parametrize("param", ["nvidia_api_key", "api_key"])
def test_api_key(model: str, mode: dict, param: str, masked_env_var: str) -> None:
client = Interface(model=model, **{**mode, **{param: masked_env_var}})
assert client.postprocess_nodes(
[NodeWithScore(node=Document(text="Hello, world!"))],
query_str="Hello, world!",
)
|
from llama_index.core.constants import DATA_KEY, TYPE_KEY
from llama_index.core.schema import (
BaseNode,
Document,
ImageDocument,
ImageNode,
IndexNode,
Node,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
def doc_to_json(doc: BaseNode) -> dict:
return {
DATA_KEY: doc.to_dict(),
TYPE_KEY: doc.get_type(),
}
def json_to_doc(doc_dict: dict) -> BaseNode:
doc_type = doc_dict[TYPE_KEY]
data_dict = doc_dict[DATA_KEY]
doc: BaseNode
if "extra_info" in data_dict:
return legacy_json_to_doc(doc_dict)
else:
if doc_type == Document.get_type():
if data_dict["class_name"] == ImageDocument.class_name():
doc = ImageDocument.from_dict(data_dict)
else:
doc = Document.from_dict(data_dict)
elif doc_type == Node.get_type():
doc = Node.from_dict(data_dict)
elif doc_type == TextNode.get_type():
doc = TextNode.from_dict(data_dict)
elif doc_type == ImageNode.get_type():
doc = ImageNode.from_dict(data_dict)
elif doc_type == IndexNode.get_type():
doc = IndexNode.from_dict(data_dict)
elif doc_type == Node.get_type():
doc = Node.from_dict(data_dict)
else:
raise ValueError(f"Unknown doc type: {doc_type}")
return doc
def legacy_json_to_doc(doc_dict: dict) -> BaseNode:
"""Todo: Deprecated legacy support for old node versions."""
doc_type = doc_dict[TYPE_KEY]
data_dict = doc_dict[DATA_KEY]
doc: BaseNode
text = data_dict.get("text", "")
metadata = data_dict.get("extra_info", {}) or {}
id_ = data_dict.get("doc_id", None)
relationships = data_dict.get("relationships", {})
relationships = {
NodeRelationship(k): RelatedNodeInfo(node_id=str(v))
for k, v in relationships.items()
}
if doc_type == Document.get_type():
doc = Document(
text=text, metadata=metadata, id=id_, relationships=relationships
)
elif doc_type == TextNode.get_type():
doc = TextNode(
text=text, metadata=metadata, id=id_, relationships=relationships
)
elif doc_type == ImageNode.get_type():
image = data_dict.get("image", None)
doc = ImageNode(
text=text,
metadata=metadata,
id=id_,
relationships=relationships,
image=image,
)
elif doc_type == IndexNode.get_type():
index_id = data_dict.get("index_id", None)
doc = IndexNode(
text=text,
metadata=metadata,
id=id_,
relationships=relationships,
index_id=index_id,
)
else:
raise ValueError(f"Unknown doc type: {doc_type}")
return doc
|
from llama_index.core.constants import DATA_KEY, TYPE_KEY
from llama_index.core.schema import (
BaseNode,
Document,
ImageDocument,
ImageNode,
IndexNode,
Node,
NodeRelationship,
RelatedNodeInfo,
TextNode,
)
def doc_to_json(doc: BaseNode) -> dict:
return {
DATA_KEY: doc.to_dict(),
TYPE_KEY: doc.get_type(),
}
def json_to_doc(doc_dict: dict) -> BaseNode:
doc_type = doc_dict[TYPE_KEY]
data_dict = doc_dict[DATA_KEY]
doc: BaseNode
if "extra_info" in data_dict:
return legacy_json_to_doc(doc_dict)
else:
if doc_type == Document.get_type():
if data_dict["class_name"] == ImageDocument.class_name():
doc = ImageDocument.from_dict(data_dict)
else:
doc = Document.from_dict(data_dict)
elif doc_type == TextNode.get_type():
doc = TextNode.from_dict(data_dict)
elif doc_type == ImageNode.get_type():
doc = ImageNode.from_dict(data_dict)
elif doc_type == IndexNode.get_type():
doc = IndexNode.from_dict(data_dict)
elif doc_type == Node.get_type():
doc = Node.from_dict(data_dict)
else:
raise ValueError(f"Unknown doc type: {doc_type}")
return doc
def legacy_json_to_doc(doc_dict: dict) -> BaseNode:
"""Todo: Deprecated legacy support for old node versions."""
doc_type = doc_dict[TYPE_KEY]
data_dict = doc_dict[DATA_KEY]
doc: BaseNode
text = data_dict.get("text", "")
metadata = data_dict.get("extra_info", {}) or {}
id_ = data_dict.get("doc_id", None)
relationships = data_dict.get("relationships", {})
relationships = {
NodeRelationship(k): RelatedNodeInfo(node_id=str(v))
for k, v in relationships.items()
}
if doc_type == Document.get_type():
doc = Document(
text=text, metadata=metadata, id=id_, relationships=relationships
)
elif doc_type == TextNode.get_type():
doc = TextNode(
text=text, metadata=metadata, id=id_, relationships=relationships
)
elif doc_type == ImageNode.get_type():
image = data_dict.get("image", None)
doc = ImageNode(
text=text,
metadata=metadata,
id=id_,
relationships=relationships,
image=image,
)
elif doc_type == IndexNode.get_type():
index_id = data_dict.get("index_id", None)
doc = IndexNode(
text=text,
metadata=metadata,
id=id_,
relationships=relationships,
index_id=index_id,
)
else:
raise ValueError(f"Unknown doc type: {doc_type}")
return doc
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.other == 5
assert index._db_config.work_dir == '.'
assert index._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}))
assert index._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
index = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={}, default_ef=10)
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](work_dir='hi')
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={})
assert index._runtime_config.default_column_config == {}
def test_default_column_config():
index = DummyDocIndex[SimpleDoc]()
assert index._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
from dataclasses import dataclass, field
from typing import Any, Dict, Type
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index.abstract import BaseDocIndex
from docarray.typing import NdArray
pytestmark = pytest.mark.index
class SimpleDoc(BaseDoc):
tens: NdArray[10] = Field(dim=1000)
class FakeQueryBuilder:
...
@dataclass
class DBConfig(BaseDocIndex.DBConfig):
work_dir: str = '.'
other: int = 5
default_column_config: Dict[Type, Dict[str, Any]] = field(
default_factory=lambda: {
str: {
'dim': 128,
'space': 'l2',
},
}
)
default_ef: int = 50
@dataclass
class RuntimeConfig(BaseDocIndex.RuntimeConfig):
pass
def _identity(*x, **y):
return x, y
class DummyDocIndex(BaseDocIndex):
DBConfig = DBConfig
RuntimeConfig = RuntimeConfig
def python_type_to_db_type(self, x):
return str
_index = _identity
num_docs = _identity
_del_items = _identity
_get_items = _identity
execute_query = _identity
_find = _identity
_find_batched = _identity
_filter = _identity
_filter_batched = _identity
_text_search = _identity
_text_search_batched = _identity
def test_defaults():
index = DummyDocIndex[SimpleDoc]()
assert index._db_config.other == 5
assert index._db_config.work_dir == '.'
assert index._runtime_config.default_column_config[str] == {
'dim': 128,
'space': 'l2',
}
def test_set_by_class():
# change all settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi', other=10))
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}, default_ef=10))
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](DBConfig(work_dir='hi'))
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(RuntimeConfig(default_column_config={}))
assert index._runtime_config.default_column_config == {}
def test_set_by_kwargs():
# change all settings
index = DummyDocIndex[SimpleDoc](work_dir='hi', other=10)
assert index._db_config.other == 10
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={}, default_ef=10)
assert index._runtime_config.default_column_config == {}
# change only some settings
index = DummyDocIndex[SimpleDoc](work_dir='hi')
assert index._db_config.other == 5
assert index._db_config.work_dir == 'hi'
index.configure(default_column_config={})
assert index._runtime_config.default_column_config == {}
def test_default_column_config():
index = DummyDocIndex[SimpleDoc]()
assert index._runtime_config.default_column_config == {
str: {
'dim': 128,
'space': 'l2',
},
}
|
from abc import ABC
from contextlib import ExitStack
from rich.table import Table
from jina.helper import CatchAllCleanupContextManager, get_internal_ip, get_public_ip
class BaseOrchestrator(ExitStack, ABC):
"""Base orchestrator class"""
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
def _init_table(self):
table = Table(
title=None, box=None, highlight=True, show_header=False, min_width=40
)
table.add_column('', justify='left')
table.add_column('', justify='right')
table.add_column('', justify='right')
return table
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
if getattr(self, '_internal_ip', None):
return self._internal_ip
else:
self._internal_ip = get_internal_ip()
return self._internal_ip
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
if getattr(self, '_public_ip', None):
return self._public_ip
else:
self._public_ip = get_public_ip()
return self._public_ip
|
from abc import ABC
from contextlib import ExitStack
from rich.table import Table
from jina.helper import CatchAllCleanupContextManager, get_internal_ip, get_public_ip
class BaseOrchestrator(ExitStack, ABC):
"""Base orchestrator class"""
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
def _init_table(self):
table = Table(
title=None, box=None, highlight=True, show_header=False, min_width=40
)
table.add_column('', justify='left')
table.add_column('', justify='right')
table.add_column('', justify='right')
return table
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from pathlib import Path
from jina import Document, DocumentArray, Executor
from ...sentencizer import Sentencizer
def test_config():
ex = Executor.load_config(str(Path(__file__).parents[2] / 'config.yml'))
assert ex.min_sent_len == 1
def test_executor():
ex = Sentencizer()
input = DocumentArray([Document(text='Hello. World.')])
ex.segment(input, {})
assert input[0].chunks[0].text == 'Hello.'
assert input[0].chunks[1].text == 'World.'
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from jina import Document, DocumentArray
from ...sentencizer import Sentencizer
def test_executor():
ex = Sentencizer.load_config('../../config.yml')
input = DocumentArray([Document(text='Hello. World.')])
ex.segment(input, {})
assert input[0].chunks[0].text == 'Hello.'
assert input[0].chunks[1].text == 'World.'
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CrossEncoderCorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CrossEncoderCorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
"""
This examples trains a CrossEncoder for the STSbenchmark task. A CrossEncoder takes a sentence pair
as input and outputs a label. Here, it output a continuous labels 0...1 to indicate the similarity between the input pair.
It does NOT produce a sentence embedding and does NOT work for individual sentences.
Usage:
python training_stsbenchmark.py
"""
import csv
import gzip
import logging
import math
import os
from datetime import datetime
from torch.utils.data import DataLoader
from sentence_transformers import InputExample, LoggingHandler, util
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
logger = logging.getLogger(__name__)
#### /print debug information to stdout
# Check if dataset exists. If not, download and extract it
sts_dataset_path = "datasets/stsbenchmark.tsv.gz"
if not os.path.exists(sts_dataset_path):
util.http_get("https://sbert.net/datasets/stsbenchmark.tsv.gz", sts_dataset_path)
# Define our Cross-Encoder
train_batch_size = 16
num_epochs = 4
model_save_path = "output/training_stsbenchmark-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
# We use distilroberta-base as base model and set num_labels=1, which predicts a continuous score between 0 and 1
model = CrossEncoder("distilroberta-base", num_labels=1)
# Read STSb dataset
logger.info("Read STSbenchmark train dataset")
train_samples = []
dev_samples = []
test_samples = []
with gzip.open(sts_dataset_path, "rt", encoding="utf8") as fIn:
reader = csv.DictReader(fIn, delimiter="\t", quoting=csv.QUOTE_NONE)
for row in reader:
score = float(row["score"]) / 5.0 # Normalize score to range 0 ... 1
if row["split"] == "dev":
dev_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
elif row["split"] == "test":
test_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
else:
# As we want to get symmetric scores, i.e. CrossEncoder(A,B) = CrossEncoder(B,A), we pass both combinations to the train set
train_samples.append(InputExample(texts=[row["sentence1"], row["sentence2"]], label=score))
train_samples.append(InputExample(texts=[row["sentence2"], row["sentence1"]], label=score))
# We wrap train_samples (which is a List[InputExample]) into a pytorch DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
# We add an evaluator, which evaluates the performance during training
evaluator = CECorrelationEvaluator.from_input_examples(dev_samples, name="sts-dev")
# Configure the training
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path,
)
##### Load model and eval on test set
model = CrossEncoder(model_save_path)
evaluator = CECorrelationEvaluator.from_input_examples(test_samples, name="sts-test")
evaluator(model)
|
import os
import pathlib
import pytest
from docarray.helper import (
protocol_and_compress_from_file_path,
add_protocol_and_compress_to_file_path,
filter_dict,
get_full_version,
)
@pytest.mark.parametrize(
'file_path', ['doc_array', '../docarray', './a_folder/docarray']
)
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_protocol_and_compress_from_file_path(file_path, protocol, compress):
file_path_extended = file_path
if protocol:
file_path_extended += '.' + protocol
if compress:
file_path_extended += '.' + compress
_protocol, _compress = protocol_and_compress_from_file_path(file_path_extended)
assert _protocol in {'protobuf', 'protobuf-array', 'pickle', 'pickle-array', None}
assert _compress in {'lz4', 'bz2', 'lzma', 'zlib', 'gzip', None}
assert protocol == _protocol
assert compress == _compress
@pytest.mark.parametrize('file_path', ['doc_array', './some_folder/doc_array'])
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip'])
def test_add_protocol_and_compress_to_file_path(file_path, compress, protocol):
file_path_extended = add_protocol_and_compress_to_file_path(
file_path, compress, protocol
)
file_path_suffixes = [
e.replace('.', '') for e in pathlib.Path(file_path_extended).suffixes
]
if compress:
assert compress in file_path_suffixes
if protocol:
assert protocol in file_path_suffixes
def test_filter_dict():
conf_dict = {'x': 0, 'y': 1, 'z': None, 'k': ''}
assert list(filter_dict(conf_dict).keys()) == ['x', 'y', 'k']
def test_ci_vendor():
if 'GITHUB_WORKFLOW' in os.environ:
assert get_full_version()['ci-vendor'] == 'GITHUB_ACTIONS'
|
import os
import pathlib
import pytest
from docarray.helper import (
protocol_and_compress_from_file_path,
add_protocol_and_compress_to_file_path,
get_full_version,
)
@pytest.mark.parametrize(
'file_path', ['doc_array', '../docarray', './a_folder/docarray']
)
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
def test_protocol_and_compress_from_file_path(file_path, protocol, compress):
file_path_extended = file_path
if protocol:
file_path_extended += '.' + protocol
if compress:
file_path_extended += '.' + compress
_protocol, _compress = protocol_and_compress_from_file_path(file_path_extended)
assert _protocol in {'protobuf', 'protobuf-array', 'pickle', 'pickle-array', None}
assert _compress in {'lz4', 'bz2', 'lzma', 'zlib', 'gzip', None}
assert protocol == _protocol
assert compress == _compress
@pytest.mark.parametrize('file_path', ['doc_array', './some_folder/doc_array'])
@pytest.mark.parametrize(
'protocol', ['protobuf', 'protobuf-array', 'pickle', 'pickle-array']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip'])
def test_add_protocol_and_compress_to_file_path(file_path, compress, protocol):
file_path_extended = add_protocol_and_compress_to_file_path(
file_path, compress, protocol
)
file_path_suffixes = [
e.replace('.', '') for e in pathlib.Path(file_path_extended).suffixes
]
if compress:
assert compress in file_path_suffixes
if protocol:
assert protocol in file_path_suffixes
def test_ci_vendor():
if 'GITHUB_WORKFLOW' in os.environ:
assert get_full_version()['ci-vendor'] == 'GITHUB_ACTIONS'
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.device import is_npu_available
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
# Since the current generation of NPU(Ascend 910) only supports
# mixed precision training, here we turn on mixed precision by default
# on the NPU to make the training normal
if is_npu_available():
optim_wrapper_cfg['type'] = 'AmpOptimWrapper'
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import inspect
from typing import List, Union
import torch
import torch.nn as nn
from mmengine.config import Config, ConfigDict
from mmengine.registry import OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS
from .optimizer_wrapper import OptimWrapper
def register_torch_optimizers() -> List[str]:
"""Register optimizers in ``torch.optim`` to the ``OPTIMIZERS`` registry.
Returns:
List[str]: A list of registered optimizers' name.
"""
torch_optimizers = []
for module_name in dir(torch.optim):
if module_name.startswith('__'):
continue
_optim = getattr(torch.optim, module_name)
if inspect.isclass(_optim) and issubclass(_optim,
torch.optim.Optimizer):
OPTIMIZERS.register_module(module=_optim)
torch_optimizers.append(module_name)
return torch_optimizers
TORCH_OPTIMIZERS = register_torch_optimizers()
def build_optim_wrapper(model: nn.Module,
cfg: Union[dict, Config, ConfigDict]) -> OptimWrapper:
"""Build function of OptimWrapper.
If ``constructor`` is set in the ``cfg``, this method will build an
optimizer wrapper constructor, and use optimizer wrapper constructor to
build the optimizer wrapper. If ``constructor`` is not set, the
``DefaultOptimWrapperConstructor`` will be used by default.
Args:
model (nn.Module): Model to be optimized.
cfg (dict): Config of optimizer wrapper, optimizer constructor and
optimizer.
Returns:
OptimWrapper: The built optimizer wrapper.
"""
optim_wrapper_cfg = copy.deepcopy(cfg)
constructor_type = optim_wrapper_cfg.pop('constructor',
'DefaultOptimWrapperConstructor')
paramwise_cfg = optim_wrapper_cfg.pop('paramwise_cfg', None)
optim_wrapper_constructor = OPTIM_WRAPPER_CONSTRUCTORS.build(
dict(
type=constructor_type,
optim_wrapper_cfg=optim_wrapper_cfg,
paramwise_cfg=paramwise_cfg))
optim_wrapper = optim_wrapper_constructor(model)
return optim_wrapper
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['image_type'] = 'uri'
doc.load_uri_to_image_tensor()
elif isinstance(value, np.ndarray):
doc.tensor = value
doc._metadata['image_type'] = 'ndarray'
else:
from PIL.Image import Image
if isinstance(value, Image):
doc.tensor = np.array(value)
doc._metadata['image_type'] = 'PIL'
return doc
def text_setter(value) -> 'Document':
from docarray import Document
return Document(text=value, modality='text')
def uri_setter(value) -> 'Document':
from docarray import Document
return Document(uri=value)
def audio_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'audio_type': 'ndarray'})
else:
return Document(
uri=value, modality='audio', _metadata={'audio_type': 'uri'}
).load_uri_to_audio_tensor()
def video_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'video_type': 'ndarray'})
else:
return Document(
uri=value, modality='video', _metadata={'video_type': 'uri'}
).load_uri_to_video_tensor()
def mesh_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'mesh_type': 'ndarray'})
else:
return Document(
uri=value, modality='mesh', _metadata={'mesh_type': 'uri'}
).load_uri_to_point_cloud_tensor(1000)
def blob_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, bytes):
return Document(blob=value, _metadata={'blob_type': 'bytes'})
else:
return Document(uri=value, _metadata={'blob_type': 'uri'}).load_uri_to_blob()
def json_setter(value) -> 'Document':
from docarray import Document
return Document(modality='json', tags=value)
def tabular_setter(value) -> 'Document':
from docarray import Document, DocumentArray
return Document(uri=value, chunks=DocumentArray.from_csv(value), modality='tabular')
|
from typing import TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
from docarray import Document
def image_setter(value) -> 'Document':
from docarray import Document
doc = Document(modality='image')
if isinstance(value, str):
doc.uri = value
doc._metadata['image_type'] = 'uri'
doc.load_uri_to_image_tensor()
elif isinstance(value, np.ndarray):
doc.tensor = value
doc._metadata['image_type'] = 'ndarray'
else:
from PIL.Image import Image
if isinstance(value, Image):
doc.tensor = np.array(value)
doc._metadata['image_type'] = 'PIL'
return doc
def text_setter(value) -> 'Document':
from docarray import Document
return Document(text=value, modality='text')
def audio_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'audio_type': 'ndarray'})
else:
return Document(
uri=value, modality='audio', _metadata={'audio_type': 'uri'}
).load_uri_to_audio_tensor()
def video_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'video_type': 'ndarray'})
else:
return Document(
uri=value, modality='video', _metadata={'video_type': 'uri'}
).load_uri_to_video_tensor()
def mesh_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, np.ndarray):
return Document(tensor=value, _metadata={'mesh_type': 'ndarray'})
else:
return Document(
uri=value, modality='mesh', _metadata={'mesh_type': 'uri'}
).load_uri_to_point_cloud_tensor(1000)
def blob_setter(value) -> 'Document':
from docarray import Document
if isinstance(value, bytes):
return Document(blob=value, _metadata={'blob_type': 'bytes'})
else:
return Document(uri=value, _metadata={'blob_type': 'uri'}).load_uri_to_blob()
def json_setter(value) -> 'Document':
from docarray import Document
return Document(modality='json', tags=value)
def tabular_setter(value) -> 'Document':
from docarray import Document, DocumentArray
return Document(uri=value, chunks=DocumentArray.from_csv(value), modality='tabular')
|
"""Reader that pulls in a BoardDocs site."""
import json
from typing import Any, List, Optional
import html2text
import requests
from bs4 import BeautifulSoup
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class BoardDocsReader(BaseReader):
"""
BoardDocs doc reader.
Read public agendas included on a BoardDocs site.
Args:
site (str): The BoardDocs site you'd like to index, e.g. "ca/redwood"
committee_id (str): The committee on the site you want to index
"""
def __init__(
self,
site: str,
committee_id: str,
) -> None:
"""Initialize with parameters."""
self.site = site
self.committee_id = committee_id
self.base_url = "https://go.boarddocs.com/" + site + "/Board.nsf"
# set up the headers required for the server to answer
self.headers = {
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"sec-ch-ua": (
'"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"'
),
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-requested-with": "XMLHttpRequest",
}
super().__init__()
def get_meeting_list(self) -> List[dict]:
"""
Returns a list of meetings for the committee.
Args:
None
Returns:
List[dict]: A list of meetings, each with a meetingID, date, and unid
"""
meeting_list_url = self.base_url + "/BD-GetMeetingsList?open"
data = "current_committee_id=" + self.committee_id
response = requests.post(meeting_list_url, headers=self.headers, data=data)
meetingsData = json.loads(response.text)
return [
{
"meetingID": meeting.get("unique", None),
"date": meeting.get("numberdate", None),
"unid": meeting.get("unid", None),
}
for meeting in meetingsData
]
def process_meeting(
self, meeting_id: str, index_pdfs: bool = True
) -> List[Document]:
"""
Returns documents from the given meeting.
"""
agenda_url = self.base_url + "/PRINT-AgendaDetailed"
# set the meetingID & committee
data = "id=" + meeting_id + "&" + "current_committee_id=" + self.committee_id
# POST the request!
response = requests.post(agenda_url, headers=self.headers, data=data)
# parse the returned HTML
soup = BeautifulSoup(response.content, "html.parser")
agenda_date = soup.find("div", {"class": "print-meeting-date"}).string
agenda_title = soup.find("div", {"class": "print-meeting-name"}).string
[fd.a.get("href") for fd in soup.find_all("div", {"class": "public-file"})]
agenda_data = html2text.html2text(response.text)
# TODO: index the linked PDFs in agenda_files!
docs = []
agenda_doc = Document(
text=agenda_data,
doc_id=meeting_id,
extra_info={
"committee": self.committee_id,
"title": agenda_title,
"date": agenda_date,
"url": agenda_url,
},
)
docs.append(agenda_doc)
return docs
def load_data(
self, meeting_ids: Optional[List[str]] = None, **load_kwargs: Any
) -> List[Document]:
"""
Load all meetings of the committee.
Args:
meeting_ids (List[str]): A list of meeting IDs to load. If None, load all meetings.
"""
# if a list of meetings wasn't provided, enumerate them all
if not meeting_ids:
meeting_ids = [
meeting.get("meetingID") for meeting in self.get_meeting_list()
]
# process all relevant meetings & return the documents
docs = []
for meeting_id in meeting_ids:
docs.extend(self.process_meeting(meeting_id))
return docs
|
"""Reader that pulls in a BoardDocs site."""
import json
from typing import Any, List, Optional
import html2text
import requests
from bs4 import BeautifulSoup
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class BoardDocsReader(BaseReader):
"""BoardDocs doc reader.
Read public agendas included on a BoardDocs site.
Args:
site (str): The BoardDocs site you'd like to index, e.g. "ca/redwood"
committee_id (str): The committee on the site you want to index
"""
def __init__(
self,
site: str,
committee_id: str,
) -> None:
"""Initialize with parameters."""
self.site = site
self.committee_id = committee_id
self.base_url = "https://go.boarddocs.com/" + site + "/Board.nsf"
# set up the headers required for the server to answer
self.headers = {
"accept": "application/json, text/javascript, */*; q=0.01",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"sec-ch-ua": (
'"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"'
),
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"macOS"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"x-requested-with": "XMLHttpRequest",
}
super().__init__()
def get_meeting_list(self) -> List[dict]:
"""
Returns a list of meetings for the committee.
Args:
None
Returns:
List[dict]: A list of meetings, each with a meetingID, date, and unid
"""
meeting_list_url = self.base_url + "/BD-GetMeetingsList?open"
data = "current_committee_id=" + self.committee_id
response = requests.post(meeting_list_url, headers=self.headers, data=data)
meetingsData = json.loads(response.text)
return [
{
"meetingID": meeting.get("unique", None),
"date": meeting.get("numberdate", None),
"unid": meeting.get("unid", None),
}
for meeting in meetingsData
]
def process_meeting(
self, meeting_id: str, index_pdfs: bool = True
) -> List[Document]:
"""
Returns documents from the given meeting.
"""
agenda_url = self.base_url + "/PRINT-AgendaDetailed"
# set the meetingID & committee
data = "id=" + meeting_id + "&" + "current_committee_id=" + self.committee_id
# POST the request!
response = requests.post(agenda_url, headers=self.headers, data=data)
# parse the returned HTML
soup = BeautifulSoup(response.content, "html.parser")
agenda_date = soup.find("div", {"class": "print-meeting-date"}).string
agenda_title = soup.find("div", {"class": "print-meeting-name"}).string
[fd.a.get("href") for fd in soup.find_all("div", {"class": "public-file"})]
agenda_data = html2text.html2text(response.text)
# TODO: index the linked PDFs in agenda_files!
docs = []
agenda_doc = Document(
text=agenda_data,
doc_id=meeting_id,
extra_info={
"committee": self.committee_id,
"title": agenda_title,
"date": agenda_date,
"url": agenda_url,
},
)
docs.append(agenda_doc)
return docs
def load_data(
self, meeting_ids: Optional[List[str]] = None, **load_kwargs: Any
) -> List[Document]:
"""Load all meetings of the committee.
Args:
meeting_ids (List[str]): A list of meeting IDs to load. If None, load all meetings.
"""
# if a list of meetings wasn't provided, enumerate them all
if not meeting_ids:
meeting_ids = [
meeting.get("meetingID") for meeting in self.get_meeting_list()
]
# process all relevant meetings & return the documents
docs = []
for meeting_id in meeting_ids:
docs.extend(self.process_meeting(meeting_id))
return docs
|
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomRotationTest(testing.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape)
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)
|
import numpy as np
from absl.testing import parameterized
from tensorflow import data as tf_data
from keras.src import backend
from keras.src import layers
from keras.src import testing
class RandomRotationTest(testing.TestCase):
@parameterized.named_parameters(
("random_rotate_neg4", -0.4),
("random_rotate_neg2", -0.2),
("random_rotate_4", 0.4),
("random_rotate_2", 0.2),
("random_rotate_tuple", (-0.2, 0.4)),
)
def test_random_rotation_shapes(self, factor):
self.run_layer_test(
layers.RandomRotation,
init_kwargs={
"factor": factor,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
supports_masking=False,
run_training_check=False,
)
def test_random_rotation_correctness(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape)
self.assertAllClose(
backend.convert_to_tensor(expected_output), actual_output, atol=1e-5
)
def test_training_false(self):
input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1))
layer = layers.RandomRotation(factor=(0.5, 0.5))
actual_output = layer(input_image, training=False)
self.assertAllClose(actual_output, input_image)
def test_tf_data_compatibility(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 5, 5, 1)
else:
input_shape = (1, 1, 5, 5)
input_image = np.reshape(np.arange(0, 25), input_shape)
layer = layers.RandomRotation(factor=(0.5, 0.5))
ds = tf_data.Dataset.from_tensor_slices(input_image).map(layer)
expected_output = np.asarray(
[
[24, 23, 22, 21, 20],
[19, 18, 17, 16, 15],
[14, 13, 12, 11, 10],
[9, 8, 7, 6, 5],
[4, 3, 2, 1, 0],
]
).reshape(input_shape[1:])
for output in ds.take(1):
output = output.numpy()
self.assertAllClose(expected_output, output)
|
# Copyright (c) OpenMMLab. All rights reserved.
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
from .zero_optimizer import ZeroRedundancyOptimizer
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'ApexOptimWrapper', 'OptimWrapperDict',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from ._deepspeed import DeepSpeedOptimWrapper
from .amp_optimizer_wrapper import AmpOptimWrapper
from .apex_optimizer_wrapper import ApexOptimWrapper
from .base import BaseOptimWrapper
from .builder import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIMIZERS,
build_optim_wrapper)
from .default_constructor import DefaultOptimWrapperConstructor
from .optimizer_wrapper import OptimWrapper
from .optimizer_wrapper_dict import OptimWrapperDict
from .zero_optimizer import ZeroRedundancyOptimizer
__all__ = [
'OPTIM_WRAPPER_CONSTRUCTORS', 'OPTIMIZERS',
'DefaultOptimWrapperConstructor', 'build_optim_wrapper', 'OptimWrapper',
'AmpOptimWrapper', 'ApexOptimWrapper', 'OptimWrapperDict',
'ZeroRedundancyOptimizer', 'BaseOptimWrapper', 'DeepSpeedOptimWrapper'
]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from ...image_tf_encoder import ImageTFEncoder
input_dim = 336
target_output_dim = 1280
@pytest.mark.parametrize('arr_in', [
(np.ones((input_dim, input_dim, 3), dtype=np.float32)),
])
def test_tf_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (target_output_dim,)
def test_tf_batch():
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(Document(blob=np.ones((input_dim, input_dim, 3), dtype=np.float32)) for _ in range(25)),
return_results=True
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (target_output_dim,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [[['r'], 10], [['c'], 0], [['cc'], 0]], ['r']),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [[['r'], 0], [['c'], 10], [['cc'], 0]], ['c']),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [[['r'], 0], [['c'], 0], [['cc'], 10]], ['cc'])
]
)
def test_traversal_path(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: List[str]):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': traversal_paths},
return_results=True
)
for path, count in docs_per_path:
assert len(DocumentArray(results[0].docs).traverse_flat(path).get_attributes('embedding')) == count
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import List
import numpy as np
import pytest
from jina import Flow, Document, DocumentArray
from jinahub.encoder.image_tf_encoder import ImageTFEncoder
input_dim = 336
target_output_dim = 1280
@pytest.mark.parametrize('arr_in', [
(np.ones((input_dim, input_dim, 3), dtype=np.float32)),
])
def test_tf_no_batch(arr_in: np.ndarray):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=DocumentArray([Document(blob=arr_in)]),
return_results=True
)
assert len(results[0].docs) == 1
assert results[0].docs[0].embedding.shape == (target_output_dim,)
def test_tf_batch():
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=(Document(blob=np.ones((input_dim, input_dim, 3), dtype=np.float32)) for _ in range(25)),
return_results=True
)
assert len(results[0].docs.get_attributes('embedding')) == 25
assert results[0].docs.get_attributes('embedding')[0].shape == (target_output_dim,)
@pytest.mark.parametrize(
['docs', 'docs_per_path', 'traversal_paths'],
[
(pytest.lazy_fixture('docs_with_blobs'), [[['r'], 10], [['c'], 0], [['cc'], 0]], ['r']),
(pytest.lazy_fixture('docs_with_chunk_blobs'), [[['r'], 0], [['c'], 10], [['cc'], 0]], ['c']),
(pytest.lazy_fixture('docs_with_chunk_chunk_blobs'), [[['r'], 0], [['c'], 0], [['cc'], 10]], ['cc'])
]
)
def test_traversal_path(docs: DocumentArray, docs_per_path: List[List[str]], traversal_paths: List[str]):
flow = Flow().add(uses=ImageTFEncoder)
with flow:
results = flow.post(
on='/test',
inputs=docs,
parameters={'traversal_paths': traversal_paths},
return_results=True
)
for path, count in docs_per_path:
assert len(DocumentArray(results[0].docs).traverse_flat(path).get_attributes('embedding')) == count
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
.. code-block:: python
from typing import Optional
import torch
from pydantic import parse_obj_as
from docarray import BaseDocument
from docarray.typing import VideoTorchTensor, VideoUrl
class MyVideoDoc(BaseDocument):
title: str
url: Optional[VideoUrl]
video_tensor: Optional[VideoTorchTensor]
doc_1 = MyVideoDoc(
title='my_first_video_doc',
video_tensor=torch.randn(size=(100, 224, 224, 3)),
)
doc_1.video_tensor.save(file_path='file_1.wav')
doc_2 = MyVideoDoc(
title='my_second_video_doc',
url='https://www.kozco.com/tech/piano2.wav',
)
doc_2.video_tensor = parse_obj_as(VideoTorchTensor, doc_2.url.load())
doc_2.video_tensor.save(file_path='file_2.wav')
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
from typing import TYPE_CHECKING, Any, List, Tuple, Type, TypeVar, Union
import numpy as np
from docarray.typing.proto_register import _register_proto
from docarray.typing.tensor.torch_tensor import TorchTensor, metaTorchAndNode
from docarray.typing.tensor.video.video_tensor_mixin import VideoTensorMixin
T = TypeVar('T', bound='VideoTorchTensor')
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
@_register_proto(proto_type_name='video_torch_tensor')
class VideoTorchTensor(TorchTensor, VideoTensorMixin, metaclass=metaTorchAndNode):
"""
Subclass of TorchTensor, to represent a video tensor.
Adds video-specific features to the tensor.
EXAMPLE USAGE
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, np.ndarray, List[Any], Tuple[Any], Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
tensor = super().validate(value=value, field=field, config=config)
return cls.validate_shape(value=tensor)
|
_base_ = './rpn_r50_fpn_1x_coco.py'
# use caffe img_norm
model = dict(
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
bgr_to_rgb=False,
pad_size_divisor=32),
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
_base_ = './rpn_r50_fpn_1x_coco.py'
# use caffe img_norm
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
preprocess_cfg=preprocess_cfg,
backbone=dict(
norm_cfg=dict(requires_grad=False),
norm_eval=True,
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')))
|
from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from docarray import Document, DocumentArray
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def _update_subindices_append_extend(self, value):
if getattr(self, '_subindices', None):
for selector, da in self._subindices.items():
docs_selector = DocumentArray(value)[selector]
if len(docs_selector) > 0:
da.extend(docs_selector)
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document', **kwargs):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._append(value, **kwargs)
self._update_subindices_append_extend(value)
def _append(self, value, **kwargs):
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
self._extend(values, **kwargs)
self._update_subindices_append_extend(values)
def _extend(self, values, **kwargs):
for value in values:
self._append(value, **kwargs)
|
from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from docarray import Document
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document'], **kwargs) -> None:
for value in values:
self.append(value)
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'data_sample']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
|
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.