input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
import argparse
import os
from typing import List
from jina.parsers.helper import CastHostAction
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://jina.ai/serve',
'authors': 'dev-team@jina.ai',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, CastToIntAction, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(
a,
(
_StoreAction,
_StoreTrueAction,
KVAppendAction,
CastToIntAction,
CastHostAction,
),
):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
elif isinstance(a, CastToIntAction):
ddd['type'] = int
elif isinstance(a, CastHostAction):
ddd['type'] = str
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
import argparse
import os
from typing import List
from jina.parsers.helper import CastHostAction
def api_to_dict(show_all_args: bool = False):
"""Convert Jina API to a dict
:param show_all_args: if set, then hidden args are also exported
:return: dict
"""
if show_all_args:
from jina.parsers import helper
helper._SHOW_ALL_ARGS, old_val = True, helper._SHOW_ALL_ARGS
from jina import __version__
from jina.parsers import get_main_parser
all_d = {
'name': 'Jina',
'description': 'Build multimodal AI services via cloud native technologies',
'license': 'Apache 2.0',
'vendor': 'Jina AI Limited',
'source': 'https://github.com/jina-ai/jina/tree/'
+ os.environ.get('JINA_VCS_VERSION', 'master'),
'url': 'https://jina.ai',
'docs': 'https://docs.jina.ai',
'authors': 'dev-team@jina.ai',
'version': __version__,
'methods': [],
'revision': os.environ.get('JINA_VCS_VERSION'),
}
def get_p(p, parent_d):
parsers = p()._actions[-1].choices
if parsers:
for p_name in parsers.keys():
d = {'name': p_name, 'options': [], 'help': parsers[p_name].description}
for ddd in _export_parser_args(
lambda *x: p()._actions[-1].choices[p_name], type_as_str=True
):
d['options'].append(ddd)
if not d['options']:
d['methods'] = []
get_p(lambda *x: parsers[p_name], d)
parent_d['methods'].append(d)
get_p(get_main_parser, all_d)
if show_all_args:
helper._SHOW_ALL_ARGS = old_val
return all_d
def _export_parser_args(parser_fn, type_as_str: bool = False, **kwargs):
from argparse import _StoreAction, _StoreTrueAction
from jina.enums import BetterEnum
from jina.parsers.helper import _SHOW_ALL_ARGS, CastToIntAction, KVAppendAction
port_attr = ('help', 'choices', 'default', 'required', 'option_strings', 'dest')
parser = parser_fn(**kwargs)
parser2 = parser_fn(**kwargs)
random_dest = set()
for a, b in zip(parser._actions, parser2._actions):
if a.default != b.default:
random_dest.add(a.dest)
for a in parser._actions:
if isinstance(
a,
(
_StoreAction,
_StoreTrueAction,
KVAppendAction,
CastToIntAction,
CastHostAction,
),
):
if not _SHOW_ALL_ARGS and a.help == argparse.SUPPRESS:
continue
ddd = {p: getattr(a, p) for p in port_attr}
if isinstance(a, _StoreTrueAction):
ddd['type'] = bool
elif isinstance(a, KVAppendAction):
ddd['type'] = dict
elif isinstance(a, CastToIntAction):
ddd['type'] = int
elif isinstance(a, CastHostAction):
ddd['type'] = str
else:
ddd['type'] = a.type
if ddd['choices']:
ddd['choices'] = [
str(k) if isinstance(k, BetterEnum) else k for k in ddd['choices']
]
ddd['type'] = str
if isinstance(ddd['default'], BetterEnum):
ddd['default'] = str(ddd['default'])
ddd['type'] = str
if ddd['type'] == str and (a.nargs == '*' or a.nargs == '+'):
ddd['type'] = List[str]
else:
continue
if a.dest in random_dest:
ddd['default_random'] = True
from jina.helper import random_identity, random_port
if isinstance(a.default, str):
ddd['default_factory'] = random_identity.__name__
elif isinstance(a.default, int):
ddd['default_factory'] = random_port.__name__
else:
ddd['default_random'] = False
if type_as_str:
ddd['type'] = getattr(ddd['type'], '__name__', str(ddd['type']))
ddd['name'] = ddd.pop('dest')
yield ddd
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .videofolder import videofolder
from .webdataset import webdataset
from .xml import xml
def _hash_python_lines(lines: list[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
"xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: dict[str, tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
".xml": ("xml", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: dict[str, list[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
# Used to filter data files based on file names
_MODULE_TO_METADATA_FILE_NAMES: Dict[str, List[str]] = {}
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_METADATA_FILE_NAMES[_module] = []
_MODULE_TO_METADATA_FILE_NAMES["imagefolder"] = imagefolder.ImageFolder.METADATA_FILENAMES
_MODULE_TO_METADATA_FILE_NAMES["audiofolder"] = imagefolder.ImageFolder.METADATA_FILENAMES
_MODULE_TO_METADATA_FILE_NAMES["videofolder"] = imagefolder.ImageFolder.METADATA_FILENAMES
|
import inspect
import re
from typing import Dict, List, Tuple
from huggingface_hub.utils import insecure_hashlib
from .arrow import arrow
from .audiofolder import audiofolder
from .cache import cache
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql
from .text import text
from .videofolder import videofolder
from .webdataset import webdataset
from .xml import xml
def _hash_python_lines(lines: list[str]) -> str:
filtered_lines = []
for line in lines:
line = re.sub(r"#.*", "", line) # remove comments
if line:
filtered_lines.append(line)
full_str = "\n".join(filtered_lines)
# Make a hash from all this code
full_bytes = full_str.encode("utf-8")
return insecure_hashlib.sha256(full_bytes).hexdigest()
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
"videofolder": (videofolder.__name__, _hash_python_lines(inspect.getsource(videofolder).splitlines())),
"webdataset": (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines())),
"xml": (xml.__name__, _hash_python_lines(inspect.getsource(xml).splitlines())),
}
# get importable module names and hash for caching
_PACKAGED_DATASETS_MODULES_2_15_HASHES = {
"csv": "eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d",
"json": "8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96",
"pandas": "3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202",
"parquet": "ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1",
"arrow": "74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137",
"text": "c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34",
"imagefolder": "7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5",
"audiofolder": "d3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c",
}
# Used to infer the module to use based on the data files extensions
_EXTENSION_TO_MODULE: dict[str, tuple[str, dict]] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
# ndjson is no longer maintained (see: https://github.com/ndjson/ndjson-spec/issues/35#issuecomment-1285673417)
".ndjson": ("json", {}),
".parquet": ("parquet", {}),
".geoparquet": ("parquet", {}),
".gpq": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
".tar": ("webdataset", {}),
".xml": ("xml", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("videofolder", {}) for ext in videofolder.VideoFolder.EXTENSIONS})
_MODULE_SUPPORTS_METADATA = {"imagefolder", "audiofolder", "videofolder"}
# Used to filter data files based on extensions given a module name
_MODULE_TO_EXTENSIONS: dict[str, list[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
for _module in _MODULE_TO_EXTENSIONS:
_MODULE_TO_EXTENSIONS[_module].append(".zip")
|
_base_ = './mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_6.4gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')),
neck=dict(
type='FPN',
in_channels=[168, 392, 784, 1624],
out_channels=256,
num_outs=5))
|
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
type='RegNet',
arch='regnetx_6.4gf',
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')),
neck=dict(
type='FPN',
in_channels=[168, 392, 784, 1624],
out_channels=256,
num_outs=5))
|
import argparse
import copy
import os
import re
import sys
import boto3
import botocore
from metadata import AMI_ID, COMMON_STACK_PARAMS, STACK_PARAMS
current_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(current_dir, ".."))
from common_blocks.utils import create_or_update_stack, wait
TEMPLATE_URL = "https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml"
def get_availability_zones(*, aws_region):
client = boto3.client("ec2", region_name=aws_region)
r = client.describe_availability_zones(
Filters=[
{"Name": "region-name", "Values": [aws_region]},
{"Name": "zone-type", "Values": ["availability-zone"]},
]
)
return sorted([x["ZoneName"] for x in r["AvailabilityZones"]])
def get_default_vpc(*, aws_region):
ec2 = boto3.resource("ec2", region_name=aws_region)
default_vpc_id = None
for x in ec2.vpcs.filter(Filters=[{"Name": "is-default", "Values": ["true"]}]):
return x
# Create default VPC if not exist
client = boto3.client("ec2", region_name=aws_region)
r = client.create_default_vpc()
default_vpc_id = r["Vpc"]["VpcId"]
return ec2.Vpc(default_vpc_id)
def format_params(args, *, stack_id, agent_iam_policy):
default_vpc = get_default_vpc(aws_region=args.aws_region)
azs = get_availability_zones(aws_region=args.aws_region)
# For each of the first two availability zones (AZs), choose the default subnet
subnets = [
x.id
for x in default_vpc.subnets.filter(
Filters=[
{"Name": "default-for-az", "Values": ["true"]},
{"Name": "availability-zone", "Values": azs[:2]},
]
)
]
assert len(subnets) == 2
params = copy.deepcopy(STACK_PARAMS[stack_id])
params["ImageId"] = AMI_ID[stack_id][args.aws_region]
params["BuildkiteQueue"] = stack_id
params["CostAllocationTagValue"] = f"buildkite-{stack_id}"
params["BuildkiteAgentToken"] = args.agent_token
params["VpcId"] = default_vpc.id
params["Subnets"] = ",".join(subnets)
params["ManagedPolicyARNs"] = agent_iam_policy
params.update(COMMON_STACK_PARAMS)
return [{"ParameterKey": k, "ParameterValue": v} for k, v in params.items()]
def get_full_stack_id(stack_id):
return f"buildkite-{stack_id}-autoscaling-group"
def create_agent_iam_policy(args, *, client):
policy_stack_name = "buildkite-agent-iam-policy"
print(f"Creating stack {policy_stack_name} for agent IAM policy...")
with open(
os.path.join(current_dir, "agent-iam-policy-template.yml"),
encoding="utf-8",
) as f:
policy_template = f.read()
promise = create_or_update_stack(
args, client=client, stack_name=policy_stack_name, template_body=policy_template
)
wait(promise, client=client)
cf = boto3.resource("cloudformation", region_name=args.aws_region)
policy = cf.StackResource(policy_stack_name, "BuildkiteAgentManagedPolicy")
return policy.physical_resource_id
def main(args):
client = boto3.client("cloudformation", region_name=args.aws_region)
agent_iam_policy = create_agent_iam_policy(args, client=client)
promises = []
for stack_id in AMI_ID:
stack_id_full = get_full_stack_id(stack_id)
print(f"Creating elastic CI stack {stack_id_full}...")
params = format_params(
args, stack_id=stack_id, agent_iam_policy=agent_iam_policy
)
promise = create_or_update_stack(
args,
client=client,
stack_name=stack_id_full,
template_url=TEMPLATE_URL,
params=params,
)
promises.append(promise)
print(f"CI stack {stack_id_full} is in progress in the background")
for promise in promises:
wait(promise, client=client)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--aws-region", type=str, required=True)
parser.add_argument("--agent-token", type=str, required=True)
args = parser.parse_args()
main(args)
|
import argparse
import copy
import os
import re
import sys
import boto3
import botocore
from metadata import AMI_ID, COMMON_STACK_PARAMS, STACK_PARAMS
current_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(current_dir, ".."))
from common_blocks.utils import create_or_update_stack, wait
TEMPLATE_URL = "https://s3.amazonaws.com/buildkite-aws-stack/latest/aws-stack.yml"
def get_availability_zones(*, aws_region):
client = boto3.client("ec2", region_name=aws_region)
r = client.describe_availability_zones(
Filters=[
{"Name": "region-name", "Values": [aws_region]},
{"Name": "zone-type", "Values": ["availability-zone"]},
]
)
return sorted([x["ZoneName"] for x in r["AvailabilityZones"]])
def get_default_vpc(*, aws_region):
ec2 = boto3.resource("ec2", region_name=aws_region)
default_vpc_id = None
for x in ec2.vpcs.filter(Filters=[{"Name": "is-default", "Values": ["true"]}]):
return x
# Create default VPC if not exist
client = boto3.client("ec2", region_name=aws_region)
r = client.create_default_vpc()
default_vpc_id = r["Vpc"]["VpcId"]
return ec2.Vpc(default_vpc_id)
def format_params(args, *, stack_id, agent_iam_policy):
default_vpc = get_default_vpc(aws_region=args.aws_region)
azs = get_availability_zones(aws_region=args.aws_region)
# For each of the first two availability zones (AZs), choose the default subnet
subnets = [
x.id
for x in default_vpc.subnets.filter(
Filters=[
{"Name": "default-for-az", "Values": ["true"]},
{"Name": "availability-zone", "Values": azs[:2]},
]
)
]
assert len(subnets) == 2
params = copy.deepcopy(STACK_PARAMS[stack_id])
params["ImageId"] = AMI_ID[stack_id][args.aws_region]
params["BuildkiteQueue"] = stack_id
params["CostAllocationTagValue"] = f"buildkite-{stack_id}"
params["BuildkiteAgentToken"] = args.agent_token
params["VpcId"] = default_vpc.id
params["Subnets"] = ",".join(subnets)
params["ManagedPolicyARN"] = agent_iam_policy
params.update(COMMON_STACK_PARAMS)
return [{"ParameterKey": k, "ParameterValue": v} for k, v in params.items()]
def get_full_stack_id(stack_id):
return f"buildkite-{stack_id}-autoscaling-group"
def create_agent_iam_policy(args, *, client):
policy_stack_name = "buildkite-agent-iam-policy"
print(f"Creating stack {policy_stack_name} for agent IAM policy...")
with open(
os.path.join(current_dir, "agent-iam-policy-template.yml"),
encoding="utf-8",
) as f:
policy_template = f.read()
promise = create_or_update_stack(
args, client=client, stack_name=policy_stack_name, template_body=policy_template
)
wait(promise, client=client)
cf = boto3.resource("cloudformation", region_name=args.aws_region)
policy = cf.StackResource(policy_stack_name, "BuildkiteAgentManagedPolicy")
return policy.physical_resource_id
def main(args):
client = boto3.client("cloudformation", region_name=args.aws_region)
agent_iam_policy = create_agent_iam_policy(args, client=client)
promises = []
for stack_id in AMI_ID:
stack_id_full = get_full_stack_id(stack_id)
print(f"Creating elastic CI stack {stack_id_full}...")
params = format_params(
args, stack_id=stack_id, agent_iam_policy=agent_iam_policy
)
promise = create_or_update_stack(
args,
client=client,
stack_name=stack_id_full,
template_url=TEMPLATE_URL,
params=params,
)
promises.append(promise)
print(f"CI stack {stack_id_full} is in progress in the background")
for promise in promises:
wait(promise, client=client)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--aws-region", type=str, required=True)
parser.add_argument("--agent-token", type=str, required=True)
args = parser.parse_args()
main(args)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend as backend
from keras.src.backend.config import (
disable_flash_attention as disable_flash_attention,
)
from keras.src.backend.config import (
enable_flash_attention as enable_flash_attention,
)
from keras.src.backend.config import epsilon as epsilon
from keras.src.backend.config import floatx as floatx
from keras.src.backend.config import image_data_format as image_data_format
from keras.src.backend.config import (
is_flash_attention_enabled as is_flash_attention_enabled,
)
from keras.src.backend.config import set_epsilon as set_epsilon
from keras.src.backend.config import set_floatx as set_floatx
from keras.src.backend.config import (
set_image_data_format as set_image_data_format,
)
from keras.src.dtype_policies.dtype_policy import dtype_policy as dtype_policy
from keras.src.dtype_policies.dtype_policy import (
set_dtype_policy as set_dtype_policy,
)
from keras.src.saving.serialization_lib import (
enable_unsafe_deserialization as enable_unsafe_deserialization,
)
from keras.src.utils.backend_utils import set_backend as set_backend
from keras.src.utils.io_utils import (
disable_interactive_logging as disable_interactive_logging,
)
from keras.src.utils.io_utils import (
enable_interactive_logging as enable_interactive_logging,
)
from keras.src.utils.io_utils import (
is_interactive_logging_enabled as is_interactive_logging_enabled,
)
from keras.src.utils.traceback_utils import (
disable_traceback_filtering as disable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
enable_traceback_filtering as enable_traceback_filtering,
)
from keras.src.utils.traceback_utils import (
is_traceback_filtering_enabled as is_traceback_filtering_enabled,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.backend.config import backend
from keras.src.backend.config import disable_flash_attention
from keras.src.backend.config import enable_flash_attention
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import is_flash_attention_enabled
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.dtype_policies.dtype_policy import dtype_policy
from keras.src.dtype_policies.dtype_policy import set_dtype_policy
from keras.src.saving.serialization_lib import enable_unsafe_deserialization
from keras.src.utils.backend_utils import set_backend
from keras.src.utils.io_utils import disable_interactive_logging
from keras.src.utils.io_utils import enable_interactive_logging
from keras.src.utils.io_utils import is_interactive_logging_enabled
from keras.src.utils.traceback_utils import disable_traceback_filtering
from keras.src.utils.traceback_utils import enable_traceback_filtering
from keras.src.utils.traceback_utils import is_traceback_filtering_enabled
|
import logging
from fastapi import Request
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks._base import BaseWebhooksManager
from backend.util.request import Requests
logger = logging.getLogger(__name__)
class Slant3DWebhooksManager(BaseWebhooksManager):
"""Manager for Slant3D webhooks"""
PROVIDER_NAME = ProviderName.SLANT3D
BASE_URL = "https://www.slant3dapi.com/api"
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""Register a new webhook with Slant3D"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key is required to register a webhook")
headers = {
"api-key": credentials.api_key.get_secret_value(),
"Content-Type": "application/json",
}
# Slant3D's API doesn't use events list, just register for all order updates
payload = {"endPoint": ingress_url}
response = Requests().post(
f"{self.BASE_URL}/customer/webhookSubscribe", headers=headers, json=payload
)
if not response.ok:
error = response.json().get("error", "Unknown error")
raise RuntimeError(f"Failed to register webhook: {error}")
webhook_config = {
"endpoint": ingress_url,
"provider": self.PROVIDER_NAME,
"events": ["order.shipped"], # Currently the only supported event
"type": webhook_type,
}
return "", webhook_config
@classmethod
async def validate_payload(
cls, webhook: integrations.Webhook, request: Request
) -> tuple[dict, str]:
"""Validate incoming webhook payload from Slant3D"""
payload = await request.json()
# Validate required fields from Slant3D API spec
required_fields = ["orderId", "status", "trackingNumber", "carrierCode"]
missing_fields = [field for field in required_fields if field not in payload]
if missing_fields:
raise ValueError(f"Missing required fields: {', '.join(missing_fields)}")
# Normalize payload structure
normalized_payload = {
"orderId": payload["orderId"],
"status": payload["status"],
"trackingNumber": payload["trackingNumber"],
"carrierCode": payload["carrierCode"],
}
# Currently Slant3D only sends shipping notifications
# Convert status to lowercase for event format compatibility
event_type = f"order.{payload['status'].lower()}"
return normalized_payload, event_type
async def _deregister_webhook(
self, webhook: integrations.Webhook, credentials: Credentials
) -> None:
"""
Note: Slant3D API currently doesn't provide a deregistration endpoint.
This would need to be handled through support.
"""
# Log warning since we can't properly deregister
logger.warning(
f"Warning: Manual deregistration required for webhook {webhook.id}"
)
pass
|
import logging
import requests
from fastapi import Request
from backend.data import integrations
from backend.data.model import APIKeyCredentials, Credentials
from backend.integrations.providers import ProviderName
from backend.integrations.webhooks._base import BaseWebhooksManager
logger = logging.getLogger(__name__)
class Slant3DWebhooksManager(BaseWebhooksManager):
"""Manager for Slant3D webhooks"""
PROVIDER_NAME = ProviderName.SLANT3D
BASE_URL = "https://www.slant3dapi.com/api"
async def _register_webhook(
self,
credentials: Credentials,
webhook_type: str,
resource: str,
events: list[str],
ingress_url: str,
secret: str,
) -> tuple[str, dict]:
"""Register a new webhook with Slant3D"""
if not isinstance(credentials, APIKeyCredentials):
raise ValueError("API key is required to register a webhook")
headers = {
"api-key": credentials.api_key.get_secret_value(),
"Content-Type": "application/json",
}
# Slant3D's API doesn't use events list, just register for all order updates
payload = {"endPoint": ingress_url}
response = requests.post(
f"{self.BASE_URL}/customer/webhookSubscribe", headers=headers, json=payload
)
if not response.ok:
error = response.json().get("error", "Unknown error")
raise RuntimeError(f"Failed to register webhook: {error}")
webhook_config = {
"endpoint": ingress_url,
"provider": self.PROVIDER_NAME,
"events": ["order.shipped"], # Currently the only supported event
"type": webhook_type,
}
return "", webhook_config
@classmethod
async def validate_payload(
cls, webhook: integrations.Webhook, request: Request
) -> tuple[dict, str]:
"""Validate incoming webhook payload from Slant3D"""
payload = await request.json()
# Validate required fields from Slant3D API spec
required_fields = ["orderId", "status", "trackingNumber", "carrierCode"]
missing_fields = [field for field in required_fields if field not in payload]
if missing_fields:
raise ValueError(f"Missing required fields: {', '.join(missing_fields)}")
# Normalize payload structure
normalized_payload = {
"orderId": payload["orderId"],
"status": payload["status"],
"trackingNumber": payload["trackingNumber"],
"carrierCode": payload["carrierCode"],
}
# Currently Slant3D only sends shipping notifications
# Convert status to lowercase for event format compatibility
event_type = f"order.{payload['status'].lower()}"
return normalized_payload, event_type
async def _deregister_webhook(
self, webhook: integrations.Webhook, credentials: Credentials
) -> None:
"""
Note: Slant3D API currently doesn't provide a deregistration endpoint.
This would need to be handled through support.
"""
# Log warning since we can't properly deregister
logger.warning(
f"Warning: Manual deregistration required for webhook {webhook.id}"
)
pass
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from ..base import TransformerMixin
from ..utils import metadata_routing
from ..utils.deprecation import _deprecate_Xt_in_inverse_transform
from ..utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
# This prevents ``set_split_inverse_transform`` to be generated for the
# non-standard ``Xt`` arg on ``inverse_transform``.
# TODO(1.7): remove when Xt is removed for inverse_transform.
__metadata_request__inverse_transform = {"Xt": metadata_routing.UNUSED}
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X=None, *, Xt=None):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Xt : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
.. deprecated:: 1.5
`Xt` was deprecated in 1.5 and will be removed in 1.7. Use `X` instead.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
X = _deprecate_Xt_in_inverse_transform(X, Xt)
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to get the list of folders under `tests/models` and split the list into `NUM_SLICES` splits.
The main use case is a GitHub Actions workflow file calling this script to get the (nested) list of folders allowing it
to split the list of jobs to run into multiple slices each containing a smaller number of jobs. This way, we can bypass
the maximum of 256 jobs in a matrix.
See the `setup` and `run_models_gpu` jobs defined in the workflow file `.github/workflows/self-scheduled.yml` for more
details.
Usage:
This script is required to be run under `tests` folder of `transformers` root directory.
Assume we are under `transformers` root directory:
```bash
cd tests
python ../utils/split_model_tests.py --num_splits 64
```
"""
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--num_splits",
type=int,
default=1,
help="the number of splits into which the (flat) list of folders will be split.",
)
args = parser.parse_args()
tests = os.getcwd()
model_tests = os.listdir(os.path.join(tests, "models"))
d1 = sorted(filter(os.path.isdir, os.listdir(tests)))
d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))
d1.remove("models")
d = d2 + d1
num_jobs = len(d)
num_jobs_per_splits = num_jobs // args.num_splits
model_splits = []
end = 0
for idx in range(args.num_splits):
start = end
end = start + num_jobs_per_splits + (1 if idx < num_jobs % args.num_splits else 0)
model_splits.append(d[start:end])
print(model_splits)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to get the list of folders under `tests/models` and split the list into `NUM_SLICES` splits.
The main use case is a GitHub Actions workflow file calling this script to get the (nested) list of folders allowing it
to split the list of jobs to run into multiple slices each containing a smaller number of jobs. This way, we can bypass
the maximum of 256 jobs in a matrix.
See the `setup` and `run_models_gpu` jobs defined in the workflow file `.github/workflows/self-scheduled.yml` for more
details.
Usage:
This script is required to be run under `tests` folder of `transformers` root directory.
Assume we are under `transformers` root directory:
```bash
cd tests
python ../utils/split_model_tests.py --num_splits 64
```
"""
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--num_splits",
type=int,
default=1,
help="the number of splits into which the (flat) list of folders will be split.",
)
args = parser.parse_args()
tests = os.getcwd()
model_tests = os.listdir(os.path.join(tests, "models"))
d1 = sorted(filter(os.path.isdir, os.listdir(tests)))
d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))
d1.remove("models")
d = d2 + d1
num_jobs = len(d)
num_jobs_per_splits = num_jobs // args.num_splits
model_splits = []
end = 0
for idx in range(args.num_splits):
start = end
end = start + num_jobs_per_splits + (1 if idx < num_jobs % args.num_splits else 0)
model_splits.append(d[start:end])
print(model_splits)
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True)
def disable_implicit_token(monkeypatch):
monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", True)
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except (ModuleNotFoundError, AttributeError):
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
import pytest
import datasets
import datasets.config
# Import fixture modules as plugins
pytest_plugins = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def pytest_collection_modifyitems(config, items):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"]):
continue
item.add_marker(pytest.mark.unit)
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
@pytest.fixture(autouse=True)
def disable_implicit_token(monkeypatch):
monkeypatch.setattr("huggingface_hub.constants.HF_HUB_DISABLE_IMPLICIT_TOKEN", True)
@pytest.fixture(autouse=True, scope="session")
def disable_tqdm_output():
datasets.disable_progress_bar()
@pytest.fixture(autouse=True)
def set_update_download_counts_to_false(monkeypatch):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS", False)
@pytest.fixture
def set_sqlalchemy_silence_uber_warning(monkeypatch):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
try:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING", True)
except (ModuleNotFoundError, AttributeError):
pass
@pytest.fixture(autouse=True, scope="session")
def zero_time_out_for_remote_code():
datasets.config.TIME_OUT_REMOTE_CODE = 0
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
FlopsLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
SpladeLoss,
)
from sentence_transformers.sparse_encoder.models import (
CSRSparsity,
MLMTransformer,
SpladePooling,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
"FlopsLoss",
"SpladeLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
from __future__ import annotations
from sentence_transformers.sparse_encoder.data_collator import SparseEncoderDataCollator
from sentence_transformers.sparse_encoder.evaluation import (
SparseBinaryClassificationEvaluator,
SparseEmbeddingSimilarityEvaluator,
SparseInformationRetrievalEvaluator,
SparseMSEEvaluator,
SparseNanoBEIREvaluator,
SparseRerankingEvaluator,
SparseTranslationEvaluator,
SparseTripletEvaluator,
)
from sentence_transformers.sparse_encoder.losses import (
CSRLoss,
CSRReconstructionLoss,
SparseAnglELoss,
SparseCachedGISTEmbedLoss,
SparseCachedMultipleNegativesRankingLoss,
SparseCoSENTLoss,
SparseCosineSimilarityLoss,
SparseDistillKLDivLoss,
SparseGISTEmbedLoss,
SparseMarginMSELoss,
SparseMSELoss,
SparseMultipleNegativesRankingLoss,
SparseTripletLoss,
)
from sentence_transformers.sparse_encoder.models import (
CSRSparsity,
MLMTransformer,
SpladePooling,
)
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
__all__ = [
# Core components
"SparseEncoder",
"SparseEncoderDataCollator",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
# Models
"CSRSparsity",
"MLMTransformer",
"SpladePooling",
# Losses
"CSRLoss",
"CSRReconstructionLoss",
"SparseMultipleNegativesRankingLoss",
"SparseCoSENTLoss",
"SparseTripletLoss",
"SparseCachedMultipleNegativesRankingLoss",
"SparseMarginMSELoss",
"SparseGISTEmbedLoss",
"SparseCachedGISTEmbedLoss",
"SparseCosineSimilarityLoss",
"SparseMSELoss",
"SparseAnglELoss",
"SparseDistillKLDivLoss",
# Evaluators
"SparseBinaryClassificationEvaluator",
"SparseEmbeddingSimilarityEvaluator",
"SparseInformationRetrievalEvaluator",
"SparseMSEEvaluator",
"SparseNanoBEIREvaluator",
"SparseTranslationEvaluator",
"SparseRerankingEvaluator",
"SparseTripletEvaluator",
]
# TODO : Complete the SparseEncoder class
# TODO : Add tests for all the components
# TODO : Add the equivalent of the quantization file for the sparse encoder
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='RandomResize',
scale=image_size,
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
max_epochs = 25
train_cfg = dict(
type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# only keep latest 2 checkpoints
default_hooks = dict(checkpoint=dict(max_keep_ckpts=2))
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (2 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
|
_base_ = '../_base_/default_runtime.py'
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
image_size = (1024, 1024)
file_client_args = dict(backend='disk')
# comment out the code below to use different file client
# file_client_args = dict(
# backend='petrel',
# path_mapping=dict({
# './data/': 's3://openmmlab/datasets/detection/',
# 'data/': 's3://openmmlab/datasets/detection/'
# }))
train_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomResize', scale=image_size, ratio_range=(0.1, 2.0)),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=image_size,
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
test_pipeline = [
dict(type='LoadImageFromFile', file_client_args=file_client_args),
dict(type='Resize', scale=(1333, 800), keep_ratio=True),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
# Use RepeatDataset to speed up training
train_dataloader = dict(
batch_size=2,
num_workers=2,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
type='RepeatDataset',
times=4, # simply change this from 2 to 16 for 50e - 400e training.
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='train2017/'),
filter_cfg=dict(filter_empty_gt=True, min_size=32),
pipeline=train_pipeline)))
val_dataloader = dict(
batch_size=1,
num_workers=2,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='val2017/'),
test_mode=True,
pipeline=test_pipeline))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'annotations/instances_val2017.json',
metric=['bbox', 'segm'],
format_only=False)
test_evaluator = val_evaluator
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=25, val_interval=5)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
# optimizer assumes bs=64
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00004))
# learning rate
param_scheduler = [
dict(
type='LinearLR', start_factor=0.067, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
|
from typing import Tuple, Iterator
import pytest
import requests
import itertools
from docarray import DocumentArray, Document
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
'dynamic_ef_max': 300,
'dynamic_ef_factor': 4,
'vector_cache_max_objects': 1000000,
'flat_search_cutoff': 20000,
'cleanup_interval_seconds': 1000,
'skip': True,
'distance': 'l2-squared',
},
)
result = requests.get('http://localhost:8080/v1/schema').json()
classes = result.get('classes', [])
main_class = list(
filter(lambda class_element: class_element['class'] == da._config.name, classes)
)
assert len(main_class) == 1
main_class = main_class[0]
assert main_class.get('vectorIndexConfig', {}).get('maxConnections') == 16
assert main_class.get('vectorIndexConfig', {}).get('efConstruction') == 100
assert main_class.get('vectorIndexConfig', {}).get('ef') == 100
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMin') == 50
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMax') == 300
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfFactor') == 4
assert (
main_class.get('vectorIndexConfig', {}).get('vectorCacheMaxObjects') == 1000000
)
assert main_class.get('vectorIndexConfig', {}).get('flatSearchCutoff') == 20000
assert main_class.get('vectorIndexConfig', {}).get('cleanupIntervalSeconds') == 1000
assert main_class.get('vectorIndexConfig', {}).get('skip') is True
assert main_class.get('vectorIndexConfig', {}).get('distance') == 'l2-squared'
def test_weaviate_da_w_protobuff(start_storage):
N = 10
index = DocumentArray(
storage='weaviate',
config={
'name': 'Test',
'columns': [('price', 'int')],
},
)
docs = DocumentArray([Document(tags={'price': i}) for i in range(N)])
docs = DocumentArray.from_protobuf(
docs.to_protobuf()
) # same as streaming the da in jina
index.extend(docs)
assert len(index) == N
@pytest.mark.parametrize('type_da', [int, float, str])
@pytest.mark.parametrize('type_column', ['int', 'float', 'str'])
def test_cast_columns_weaviate(start_storage, type_da, type_column, request):
test_id = request.node.callspec.id.replace(
'-', ''
) # remove '-' from the test id for the weaviate name
N = 10
index = DocumentArray(
storage='weaviate',
config={
'name': f'Test{test_id}',
'columns': [('price', type_column)],
},
)
docs = DocumentArray([Document(tags={'price': type_da(i)}) for i in range(10)])
index.extend(docs)
assert len(index) == N
@pytest.mark.parametrize('type_da', [int, float, str])
@pytest.mark.parametrize('type_column', ['int', 'float', 'str'])
def test_cast_columns_annlite(start_storage, type_da, type_column):
N = 10
index = DocumentArray(
storage='annlite',
config={
'n_dim': 3,
'columns': [('price', type_column)],
},
)
docs = DocumentArray([Document(tags={'price': type_da(i)}) for i in range(10)])
index.extend(docs)
assert len(index) == N
@pytest.mark.parametrize('type_da', [int, float, str])
@pytest.mark.parametrize('type_column', ['int', 'float', 'str'])
def test_cast_columns_qdrant(start_storage, type_da, type_column, request):
test_id = request.node.callspec.id.replace(
'-', ''
) # remove '-' from the test id for the weaviate name
N = 10
index = DocumentArray(
storage='qdrant',
config={
'collection_name': f'test{test_id}',
'n_dim': 3,
'columns': [('price', type_column)],
},
)
docs = DocumentArray([Document(tags={'price': type_da(i)}) for i in range(10)])
index.extend(docs)
assert len(index) == N
|
import requests
from docarray import DocumentArray
def test_weaviate_hnsw(start_storage):
da = DocumentArray(
storage='weaviate',
config={
'n_dim': 100,
'ef': 100,
'ef_construction': 100,
'max_connections': 16,
'dynamic_ef_min': 50,
'dynamic_ef_max': 300,
'dynamic_ef_factor': 4,
'vector_cache_max_objects': 1000000,
'flat_search_cutoff': 20000,
'cleanup_interval_seconds': 1000,
'skip': True,
'distance': 'l2-squared',
},
)
result = requests.get('http://localhost:8080/v1/schema').json()
classes = result.get('classes', [])
main_class = list(
filter(lambda class_element: class_element['class'] == da._config.name, classes)
)
assert len(main_class) == 1
main_class = main_class[0]
assert main_class.get('vectorIndexConfig', {}).get('maxConnections') == 16
assert main_class.get('vectorIndexConfig', {}).get('efConstruction') == 100
assert main_class.get('vectorIndexConfig', {}).get('ef') == 100
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMin') == 50
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfMax') == 300
assert main_class.get('vectorIndexConfig', {}).get('dynamicEfFactor') == 4
assert (
main_class.get('vectorIndexConfig', {}).get('vectorCacheMaxObjects') == 1000000
)
assert main_class.get('vectorIndexConfig', {}).get('flatSearchCutoff') == 20000
assert main_class.get('vectorIndexConfig', {}).get('cleanupIntervalSeconds') == 1000
assert main_class.get('vectorIndexConfig', {}).get('skip') is True
assert main_class.get('vectorIndexConfig', {}).get('distance') == 'l2-squared'
|
import os
from functools import lru_cache
from subprocess import CalledProcessError, run
from typing import Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input
N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame
TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
# This launches a subprocess to decode audio while down-mixing
# and resampling as necessary. Requires the ffmpeg CLI in PATH.
# fmt: off
cmd = [
"ffmpeg",
"-nostdin",
"-threads", "0",
"-i", file,
"-f", "s16le",
"-ac", "1",
"-acodec", "pcm_s16le",
"-ar", str(sr),
"-"
]
# fmt: on
try:
out = run(cmd, capture_output=True, check=True).stdout
except CalledProcessError as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(
dim=axis, index=torch.arange(length, device=array.device)
)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
filters_path = os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")
with np.load(filters_path, allow_pickle=False) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(
audio: Union[str, np.ndarray, torch.Tensor],
n_mels: int = N_MELS,
padding: int = 0,
device: Optional[Union[str, torch.device]] = None,
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
padding: int
Number of zero samples to pad to the right
device: Optional[Union[str, torch.device]]
If given, the audio tensor is moved to this device before STFT
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
if device is not None:
audio = audio.to(device)
if padding > 0:
audio = F.pad(audio, (0, padding))
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
import os
from functools import lru_cache
from subprocess import CalledProcessError, run
from typing import Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from .utils import exact_div
# hard-coded audio hyperparameters
SAMPLE_RATE = 16000
N_FFT = 400
N_MELS = 80
HOP_LENGTH = 160
CHUNK_LENGTH = 30
N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk
N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input
N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2
FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame
TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token
def load_audio(file: str, sr: int = SAMPLE_RATE):
"""
Open an audio file and read as mono waveform, resampling as necessary
Parameters
----------
file: str
The audio file to open
sr: int
The sample rate to resample the audio if necessary
Returns
-------
A NumPy array containing the audio waveform, in float32 dtype.
"""
# This launches a subprocess to decode audio while down-mixing
# and resampling as necessary. Requires the ffmpeg CLI in PATH.
# fmt: off
cmd = [
"ffmpeg",
"-nostdin",
"-threads", "0",
"-i", file,
"-f", "s16le",
"-ac", "1",
"-acodec", "pcm_s16le",
"-ar", str(sr),
"-"
]
# fmt: on
try:
out = run(cmd, capture_output=True, check=True).stdout
except CalledProcessError as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0
def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):
"""
Pad or trim the audio array to N_SAMPLES, as expected by the encoder.
"""
if torch.is_tensor(array):
if array.shape[axis] > length:
array = array.index_select(
dim=axis, index=torch.arange(length, device=array.device)
)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes])
else:
if array.shape[axis] > length:
array = array.take(indices=range(length), axis=axis)
if array.shape[axis] < length:
pad_widths = [(0, 0)] * array.ndim
pad_widths[axis] = (0, length - array.shape[axis])
array = np.pad(array, pad_widths)
return array
@lru_cache(maxsize=None)
def mel_filters(device, n_mels: int = N_MELS) -> torch.Tensor:
"""
load the mel filterbank matrix for projecting STFT into a Mel spectrogram.
Allows decoupling librosa dependency; saved using:
np.savez_compressed(
"mel_filters.npz",
mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80),
)
"""
assert n_mels == 80, f"Unsupported n_mels: {n_mels}"
with np.load(
os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz")
) as f:
return torch.from_numpy(f[f"mel_{n_mels}"]).to(device)
def log_mel_spectrogram(
audio: Union[str, np.ndarray, torch.Tensor],
n_mels: int = N_MELS,
padding: int = 0,
device: Optional[Union[str, torch.device]] = None,
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: Union[str, np.ndarray, torch.Tensor], shape = (*)
The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz
n_mels: int
The number of Mel-frequency filters, only 80 is supported
padding: int
Number of zero samples to pad to the right
device: Optional[Union[str, torch.device]]
If given, the audio tensor is moved to this device before STFT
Returns
-------
torch.Tensor, shape = (80, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
if isinstance(audio, str):
audio = load_audio(audio)
audio = torch.from_numpy(audio)
if device is not None:
audio = audio.to(device)
if padding > 0:
audio = F.pad(audio, (0, padding))
window = torch.hann_window(N_FFT).to(audio.device)
stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
filters = mel_filters(audio.device, n_mels)
mel_spec = filters @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
|
"""**Load** module helps with serialization and deserialization."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import loads
from langchain_core.load.serializable import Serializable
# Unfortunately, we have to eagerly import load from langchain_core/load/load.py
# eagerly to avoid a namespace conflict. We want users to still be able to use
# `from langchain_core.load import load` to get the load function, but
# the `from langchain_core.load.load import load` absolute import should also work.
from langchain_core.load.load import load
__all__ = ["dumpd", "dumps", "load", "loads", "Serializable"]
_dynamic_imports = {
"dumpd": "dump",
"dumps": "dump",
"load": "load",
"loads": "load",
"Serializable": "serializable",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
"""**Load** module helps with serialization and deserialization."""
from importlib import import_module
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain_core.load.dump import dumpd, dumps
from langchain_core.load.load import load, loads
from langchain_core.load.serializable import Serializable
__all__ = ["dumpd", "dumps", "load", "loads", "Serializable"]
_dynamic_imports = {
"dumpd": "dump",
"dumps": "dump",
"load": "load",
"loads": "load",
"Serializable": "serializable",
}
def __getattr__(attr_name: str) -> object:
module_name = _dynamic_imports.get(attr_name)
package = __spec__.parent
if module_name == "__module__" or module_name is None:
result = import_module(f".{attr_name}", package=package)
else:
module = import_module(f".{module_name}", package=package)
result = getattr(module, attr_name)
globals()[attr_name] = result
return result
def __dir__() -> list[str]:
return list(__all__)
|
__version__ = '0.18.1'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.18.0'
import os
from docarray.document import Document
from docarray.array import DocumentArray
from docarray.dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity: Active Dimensions: 63.1, Sparsity Ratio: 0.9979
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseBinaryClassificationEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Initialize the SPLADE model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
# Load a dataset with two text columns and a class label column (https://huggingface.co/datasets/sentence-transformers/quora-duplicates)
eval_dataset = load_dataset("sentence-transformers/quora-duplicates", "pair-class", split="train[-1000:]")
# Initialize the evaluator
binary_acc_evaluator = SparseBinaryClassificationEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
labels=eval_dataset["label"],
name="quora_duplicates_dev",
show_progress_bar=True,
similarity_fn_names=["cosine", "dot", "euclidean", "manhattan"],
)
results = binary_acc_evaluator(model)
"""
Accuracy with Cosine-Similarity: 74.90 (Threshold: 0.8668)
F1 with Cosine-Similarity: 67.37 (Threshold: 0.5959)
Precision with Cosine-Similarity: 54.15
Recall with Cosine-Similarity: 89.13
Average Precision with Cosine-Similarity: 67.81
Matthews Correlation with Cosine-Similarity: 49.89
Accuracy with Dot-Product: 76.50 (Threshold: 24.3460)
F1 with Dot-Product: 66.93 (Threshold: 20.0762)
Precision with Dot-Product: 57.62
Recall with Dot-Product: 79.81
Average Precision with Dot-Product: 65.94
Matthews Correlation with Dot-Product: 48.82
Accuracy with Euclidean-Distance: 67.70 (Threshold: -10.0062)
F1 with Euclidean-Distance: 48.60 (Threshold: -0.2346)
Precision with Euclidean-Distance: 32.13
Recall with Euclidean-Distance: 99.69
Average Precision with Euclidean-Distance: 20.52
Matthews Correlation with Euclidean-Distance: -4.59
Accuracy with Manhattan-Distance: 67.70 (Threshold: -103.1993)
F1 with Manhattan-Distance: 48.60 (Threshold: -1.1565)
Precision with Manhattan-Distance: 32.13
Recall with Manhattan-Distance: 99.69
Average Precision with Manhattan-Distance: 21.05
Matthews Correlation with Manhattan-Distance: -4.59
Model Sparsity Stats: Row Non-Zero Mean: 63.13884735107422, Row Sparsity Mean: 0.9979313611984253
"""
# Print the results
print(f"Primary metric: {binary_acc_evaluator.primary_metric}")
# => Primary metric: quora_duplicates_dev_max_ap
print(f"Primary metric value: {results[binary_acc_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.6781
|
import os
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from docarray.typing.url.mimetypes import (
OBJ_MIMETYPE,
AUDIO_MIMETYPE,
VIDEO_MIMETYPE,
IMAGE_MIMETYPE,
TEXT_MIMETYPE,
)
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
tensors = url.load()
assert isinstance(tensors.vertices, np.ndarray)
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, np.ndarray)
assert isinstance(tensors.faces, NdArray)
assert tensors.vertices.shape[1] == 3
assert tensors.faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', ['vertices', 'faces'])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
@pytest.mark.parametrize(
'file_type, file_source',
[
(OBJ_MIMETYPE, MESH_FILES['obj']),
(OBJ_MIMETYPE, MESH_FILES['glb']),
(OBJ_MIMETYPE, MESH_FILES['ply']),
(OBJ_MIMETYPE, REMOTE_OBJ_FILE),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.aac')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.mp3')),
(AUDIO_MIMETYPE, os.path.join(TOYDATA_DIR, 'hello.ogg')),
(VIDEO_MIMETYPE, os.path.join(TOYDATA_DIR, 'mov_bbb.mp4')),
(IMAGE_MIMETYPE, os.path.join(TOYDATA_DIR, 'test.png')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.html')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'test' 'test.md')),
(TEXT_MIMETYPE, os.path.join(TOYDATA_DIR, 'penal_colony.txt')),
],
)
def test_file_validation(file_type, file_source):
if file_type != Mesh3DUrl.mime_type():
with pytest.raises(ValueError):
parse_obj_as(Mesh3DUrl, file_source)
else:
parse_obj_as(Mesh3DUrl, file_source)
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import Mesh3DUrl, NdArray
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
url = parse_obj_as(Mesh3DUrl, file_path)
tensors = url.load()
assert isinstance(tensors.vertices, np.ndarray)
assert isinstance(tensors.vertices, NdArray)
assert isinstance(tensors.faces, np.ndarray)
assert isinstance(tensors.faces, NdArray)
assert tensors.vertices.shape[1] == 3
assert tensors.faces.shape[1] == 3
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_path',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
@pytest.mark.parametrize('field', ['vertices', 'faces'])
def test_load_one_of_fields(file_path, field):
url = parse_obj_as(Mesh3DUrl, file_path)
field = getattr(url.load(), field)
assert isinstance(field, np.ndarray)
assert isinstance(field, NdArray)
def test_json_schema():
schema_json_of(Mesh3DUrl)
def test_dump_json():
url = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*MESH_FILES.values(), REMOTE_OBJ_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(Mesh3DUrl, path_to_file)
assert isinstance(url, Mesh3DUrl)
assert isinstance(url, str)
@pytest.mark.proto
def test_proto_mesh_url():
uri = parse_obj_as(Mesh3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.fashion_mnist import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.fashion_mnist import load_data
|
import csv
from contextlib import nullcontext
from typing import Union, TextIO, Optional, Dict, TYPE_CHECKING, Type, Sequence
import numpy as np
if TYPE_CHECKING: # pragma: no cover
from docarray.typing import T
class CsvIOMixin:
"""CSV IO helper.
can be applied to DA & DAM
"""
def save_embeddings_csv(
self, file: Union[str, TextIO], encoding: str = 'utf-8', **kwargs
) -> None:
"""Save embeddings to a CSV file
This function utilizes :meth:`numpy.savetxt` internal.
:param file: File or filename to which the data is saved.
:param encoding: encoding used to save the data into a file. By default, ``utf-8`` is used.
:param kwargs: extra kwargs will be passed to :meth:`numpy.savetxt`.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx:
np.savetxt(file_ctx, self.embeddings, **kwargs)
def save_csv(
self,
file: Union[str, TextIO],
flatten_tags: bool = True,
exclude_fields: Optional[Sequence[str]] = None,
dialect: Union[str, 'csv.Dialect'] = 'excel',
with_header: bool = True,
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a CSV file.
:param file: File or filename to which the data is saved.
:param flatten_tags: if set, then all fields in ``Document.tags`` will be flattened into ``tag__fieldname`` and
stored as separated columns. It is useful when ``tags`` contain a lot of information.
:param exclude_fields: if set, those fields wont show up in the output CSV
:param dialect: define a set of parameters specific to a particular CSV dialect. could be a string that represents
predefined dialects in your system, or could be a :class:`csv.Dialect` class that groups specific formatting
parameters together.
:param encoding: encoding used to save the data into a CSV file. By default, ``utf-8`` is used.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx as fp:
if flatten_tags and self[0].tags:
keys = list(self[0].non_empty_fields) + list(
f'tag__{k}' for k in self[0].tags
)
keys.remove('tags')
else:
flatten_tags = False
keys = list(self[0].non_empty_fields)
if exclude_fields:
for k in exclude_fields:
if k in keys:
keys.remove(k)
writer = csv.DictWriter(fp, fieldnames=keys, dialect=dialect)
if with_header:
writer.writeheader()
for d in self:
pd = d.to_dict(
protocol='jsonschema',
exclude=set(exclude_fields) if exclude_fields else None,
exclude_none=True,
)
if flatten_tags:
t = pd.pop('tags')
pd.update({f'tag__{k}': v for k, v in t.items()})
writer.writerow(pd)
@classmethod
def load_csv(
cls: Type['T'],
file: Union[str, TextIO],
field_resolver: Optional[Dict[str, str]] = None,
encoding: str = 'utf-8',
) -> 'T':
"""Load array elements from a binary file.
:param file: File or filename to which the data is saved.
:param field_resolver: a map from field names defined in JSON, dict to the field
names defined in Document.
:param encoding: encoding used to read a CSV file. By default, ``utf-8`` is used.
:return: a DocumentArray object
"""
from docarray.document.generators import from_csv
return cls(from_csv(file, field_resolver=field_resolver, encoding=encoding))
|
import csv
from contextlib import nullcontext
from typing import Union, TextIO, Optional, Dict, TYPE_CHECKING, Type, Sequence
import numpy as np
if TYPE_CHECKING:
from docarray.typing import T
class CsvIOMixin:
"""CSV IO helper.
can be applied to DA & DAM
"""
def save_embeddings_csv(
self, file: Union[str, TextIO], encoding: str = 'utf-8', **kwargs
) -> None:
"""Save embeddings to a CSV file
This function utilizes :meth:`numpy.savetxt` internal.
:param file: File or filename to which the data is saved.
:param encoding: encoding used to save the data into a file. By default, ``utf-8`` is used.
:param kwargs: extra kwargs will be passed to :meth:`numpy.savetxt`.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx:
np.savetxt(file_ctx, self.embeddings, **kwargs)
def save_csv(
self,
file: Union[str, TextIO],
flatten_tags: bool = True,
exclude_fields: Optional[Sequence[str]] = None,
dialect: Union[str, 'csv.Dialect'] = 'excel',
with_header: bool = True,
encoding: str = 'utf-8',
) -> None:
"""Save array elements into a CSV file.
:param file: File or filename to which the data is saved.
:param flatten_tags: if set, then all fields in ``Document.tags`` will be flattened into ``tag__fieldname`` and
stored as separated columns. It is useful when ``tags`` contain a lot of information.
:param exclude_fields: if set, those fields wont show up in the output CSV
:param dialect: define a set of parameters specific to a particular CSV dialect. could be a string that represents
predefined dialects in your system, or could be a :class:`csv.Dialect` class that groups specific formatting
parameters together.
:param encoding: encoding used to save the data into a CSV file. By default, ``utf-8`` is used.
"""
if hasattr(file, 'write'):
file_ctx = nullcontext(file)
else:
file_ctx = open(file, 'w', encoding=encoding)
with file_ctx as fp:
if flatten_tags and self[0].tags:
keys = list(self[0].non_empty_fields) + list(
f'tag__{k}' for k in self[0].tags
)
keys.remove('tags')
else:
flatten_tags = False
keys = list(self[0].non_empty_fields)
if exclude_fields:
for k in exclude_fields:
if k in keys:
keys.remove(k)
writer = csv.DictWriter(fp, fieldnames=keys, dialect=dialect)
if with_header:
writer.writeheader()
for d in self:
pd = d.to_dict(
protocol='jsonschema',
exclude=set(exclude_fields) if exclude_fields else None,
exclude_none=True,
)
if flatten_tags:
t = pd.pop('tags')
pd.update({f'tag__{k}': v for k, v in t.items()})
writer.writerow(pd)
@classmethod
def load_csv(
cls: Type['T'],
file: Union[str, TextIO],
field_resolver: Optional[Dict[str, str]] = None,
encoding: str = 'utf-8',
) -> 'T':
"""Load array elements from a binary file.
:param file: File or filename to which the data is saved.
:param field_resolver: a map from field names defined in JSON, dict to the field
names defined in Document.
:param encoding: encoding used to read a CSV file. By default, ``utf-8`` is used.
:return: a DocumentArray object
"""
from docarray.document.generators import from_csv
return cls(from_csv(file, field_resolver=field_resolver, encoding=encoding))
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.encode_query.side_effect = mock_encode
model.encode_document.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
from __future__ import annotations
from pathlib import Path
from unittest.mock import Mock, PropertyMock
import pytest
import torch
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import InformationRetrievalEvaluator
from sentence_transformers.util import cos_sim
@pytest.fixture
def mock_model():
def mock_encode(sentences: str | list[str], **kwargs) -> torch.Tensor:
"""
We simply one-hot encode the sentences; if a sentence contains a keyword, the corresponding one-hot
encoding is added to the sentence embedding.
"""
one_hot_encodings = {
"pokemon": torch.tensor([1.0, 0.0, 0.0, 0.0, 0.0]),
"car": torch.tensor([0.0, 1.0, 0.0, 0.0, 0.0]),
"vehicle": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
"fruit": torch.tensor([0.0, 0.0, 0.0, 1.0, 0.0]),
"vegetable": torch.tensor([0.0, 0.0, 0.0, 0.0, 1.0]),
}
if isinstance(sentences, str):
sentences = [sentences]
embeddings = []
for sentence in sentences:
encoding = torch.zeros(5)
for keyword, one_hot in one_hot_encodings.items():
if keyword in sentence:
encoding += one_hot
embeddings.append(encoding)
return torch.stack(embeddings)
model = Mock(spec=SentenceTransformer)
model.similarity_fn_name = "cosine"
model.similarity.side_effect = cos_sim
model.encode.side_effect = mock_encode
model.model_card_data = PropertyMock(return_value=Mock())
return model
@pytest.fixture
def test_data():
queries = {
"0": "What is a pokemon?",
"1": "What is a vegetable?",
"2": "What is a fruit?",
"3": "What is a vehicle?",
"4": "What is a car?",
}
corpus = {
"0": "A pokemon is a fictional creature",
"1": "A vegetable is a plant",
"2": "A fruit is a plant",
"3": "A vehicle is a machine",
"4": "A car is a vehicle",
}
relevant_docs = {"0": {"0"}, "1": {"1"}, "2": {"2"}, "3": {"3", "4"}, "4": {"4"}}
return queries, corpus, relevant_docs
def test_simple(test_data, tmp_path: Path):
queries, corpus, relevant_docs = test_data
model = SentenceTransformer("sentence-transformers-testing/stsb-bert-tiny-safetensors")
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(model, output_path=str(tmp_path))
expected_keys = [
"test_cosine_accuracy@1",
"test_cosine_accuracy@3",
"test_cosine_precision@1",
"test_cosine_precision@3",
"test_cosine_recall@1",
"test_cosine_recall@3",
"test_cosine_ndcg@3",
"test_cosine_mrr@3",
"test_cosine_map@5",
]
assert set(results.keys()) == set(expected_keys)
def test_metrices(test_data, mock_model, tmp_path: Path):
queries, corpus, relevant_docs = test_data
ir_evaluator = InformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="test",
accuracy_at_k=[1, 3],
precision_recall_at_k=[1, 3],
mrr_at_k=[3],
ndcg_at_k=[3],
map_at_k=[5],
)
results = ir_evaluator(mock_model, output_path=str(tmp_path))
# We expect test_cosine_precision@3 to be 0.4, since 6 out of 15 (5 queries * 3) are True Positives
# We expect test_cosine_recall@1 to be 0.9; the average of 4 times a recall of 1 and once a recall of 0.5
expected_results = {
"test_cosine_accuracy@1": 1.0,
"test_cosine_accuracy@3": 1.0,
"test_cosine_precision@1": 1.0,
"test_cosine_precision@3": 0.4,
"test_cosine_recall@1": 0.9,
"test_cosine_recall@3": 1.0,
"test_cosine_ndcg@3": 1.0,
"test_cosine_mrr@3": 1.0,
"test_cosine_map@5": 1.0,
}
for key, expected_value in expected_results.items():
assert results[key] == pytest.approx(expected_value, abs=1e-9)
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder, models
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
asym = models.Asym(
{
"query": [
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
}
)
sparse_model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(
[{"doc": doc} for doc in corpus], convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode([{"query": query} for query in queries], convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
corpus_index=corpus_index,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
"""
This script contains an example how to perform semantic search with OpenSearch.
You need OpenSearch up and running locally:
https://docs.opensearch.org/docs/latest/getting-started/quickstart/
Further, you need the Python OpenSearch Client installed: https://docs.opensearch.org/docs/latest/clients/python-low-level/, e.g.:
```
pip install opensearch-py
```
This script was created for `opensearch` v2.15.0+.
"""
import time
from datasets import load_dataset
from sentence_transformers import SparseEncoder, models
from sentence_transformers.sparse_encoder.models import IDF, MLMTransformer, SpladePooling
from sentence_transformers.sparse_encoder.search_engines import semantic_search_opensearch
# 1. Load the natural-questions dataset with 100K answers
dataset = load_dataset("sentence-transformers/natural-questions", split="train")
num_docs = 10_000
corpus = dataset["answer"][:num_docs]
print(f"Finish loading data. Corpus size: {len(corpus)}")
# 2. Come up with some queries
queries = dataset["query"][:2]
# 3. Load the model
model_id = "opensearch-project/opensearch-neural-sparse-encoding-doc-v3-distill"
doc_encoder = MLMTransformer(model_id)
asym = models.Asym(
{
"query": [
IDF.from_json(
model_id,
tokenizer=doc_encoder.tokenizer,
frozen=True,
),
],
"doc": [
doc_encoder,
SpladePooling("max", activation_function="log1p_relu"),
],
}
)
sparse_model = SparseEncoder(
modules=[asym],
similarity_fn_name="dot",
)
print("Start encoding corpus...")
start_time = time.time()
# 4. Encode the corpus
corpus_embeddings = sparse_model.encode(
[{"doc": doc} for doc in corpus], convert_to_sparse_tensor=True, batch_size=32, show_progress_bar=True
)
corpus_embeddings_decoded = sparse_model.decode(corpus_embeddings)
print(f"Corpus encoding time: {time.time() - start_time:.6f} seconds")
corpus_index = None
while True:
# 5. Encode the queries using inference-free mode
start_time = time.time()
query_embeddings = sparse_model.encode([{"query": query} for query in queries], convert_to_sparse_tensor=True)
query_embeddings_decoded = sparse_model.decode(query_embeddings)
print(f"Query encoding time: {time.time() - start_time:.6f} seconds")
# 6. Perform semantic search using OpenSearch
results, search_time, corpus_index = semantic_search_opensearch(
query_embeddings_decoded,
corpus_index=corpus_index,
corpus_embeddings_decoded=corpus_embeddings_decoded if corpus_index is None else None,
top_k=5,
output_index=True,
)
# 7. Output the results
print(f"Search time: {search_time:.6f} seconds")
for query, result in zip(queries, results):
print(f"Query: {query}")
for entry in result:
print(f"(Score: {entry['score']:.4f}) {corpus[entry['corpus_id']]}, corpus_id: {entry['corpus_id']}")
print("")
# 8. Prompt for more queries
queries = [input("Please enter a question: ")]
|
_base_ = [
'../_base_/models/faster-rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
_base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import softmax
class SoftmaxTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_softmax(self):
self.run_layer_test(
softmax.Softmax,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_softmax_correctness(self):
softmax_layer = softmax.Softmax()
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_mask(self):
softmax_layer = softmax.Softmax(axis=(1, 0))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
mask = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
expected_output = np.array(
[[0.21194154, 0.0, 0.21194154], [0.0, 0.57611686, 0.0]]
)
result = softmax_layer(input, mask=mask)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_axis(self):
softmax_layer = softmax.Softmax(axis=(1))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
|
import numpy as np
import pytest
from keras.src import testing
from keras.src.layers.activations import softmax
class SoftmaxTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_softmax(self):
self.run_layer_test(
softmax.Softmax,
init_kwargs={},
input_shape=(2, 3, 4),
supports_masking=True,
)
def test_softmax_correctness(self):
softmax_layer = softmax.Softmax()
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_mask(self):
softmax_layer = softmax.Softmax(axis=(1, 0))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
mask = np.array([[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]])
expected_output = np.array(
[[0.21194154, 0.0, 0.21194154], [0.0, 0.57611686, 0.0]]
)
result = softmax_layer(input, mask=mask)
self.assertAllClose(result, expected_output)
def test_softmax_correctness_with_axis(self):
softmax_layer = softmax.Softmax(axis=(1))
input = np.array([[1.0, 2.0, 1.0], [1.0, 2.0, 1.0]])
expected_output = np.array(
[
[0.21194157, 0.5761169, 0.21194157],
[0.21194157, 0.5761169, 0.21194157],
]
)
result = softmax_layer(input)
self.assertAllClose(result, expected_output)
|
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticV7DocIndex
from tests.index.elastic.fixture import start_storage_v7 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config():
class MyDoc(BaseDoc):
text: str
color: str = Field(col_type='keyword')
index = ElasticV7DocIndex[MyDoc]()
index_docs = [
MyDoc(id='0', text='hello world', color='red'),
MyDoc(id='1', text='never gonna give you up', color='blue'),
MyDoc(id='2', text='we are the world', color='green'),
]
index.index(index_docs)
query = 'world'
docs, _ = index.text_search(query, search_field='text')
assert [doc.id for doc in docs] == ['0', '2']
filter_query = {'terms': {'color': ['red', 'blue']}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == ['0', '1']
def test_field_object():
class MyDoc(BaseDoc):
manager: dict = Field(
properties={
'age': {'type': 'integer'},
'name': {
'properties': {
'first': {'type': 'keyword'},
'last': {'type': 'keyword'},
}
},
}
)
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(manager={'age': 25, 'name': {'first': 'Rachel', 'last': 'Green'}}),
MyDoc(manager={'age': 30, 'name': {'first': 'Monica', 'last': 'Geller'}}),
MyDoc(manager={'age': 35, 'name': {'first': 'Phoebe', 'last': 'Buffay'}}),
]
index.index(doc)
id_ = doc[0].id
assert index[id_].id == id_
assert index[id_].manager == doc[0].manager
filter_query = {'range': {'manager.age': {'gte': 30}}}
docs = index.filter(filter_query)
assert [doc.id for doc in docs] == [doc[1].id, doc[2].id]
def test_field_geo_point():
class MyDoc(BaseDoc):
location: dict = Field(col_type='geo_point')
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(location={'lat': 40.12, 'lon': -72.34}),
MyDoc(location={'lat': 41.12, 'lon': -73.34}),
MyDoc(location={'lat': 42.12, 'lon': -74.34}),
]
index.index(doc)
query = {
'query': {
'geo_bounding_box': {
'location': {
'top_left': {'lat': 42, 'lon': -74},
'bottom_right': {'lat': 40, 'lon': -72},
}
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_field_range():
class MyDoc(BaseDoc):
expected_attendees: dict = Field(col_type='integer_range')
time_frame: dict = Field(col_type='date_range', format='yyyy-MM-dd')
index = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(
expected_attendees={'gte': 10, 'lt': 20},
time_frame={'gte': '2023-01-01', 'lt': '2023-02-01'},
),
MyDoc(
expected_attendees={'gte': 20, 'lt': 30},
time_frame={'gte': '2023-02-01', 'lt': '2023-03-01'},
),
MyDoc(
expected_attendees={'gte': 30, 'lt': 40},
time_frame={'gte': '2023-03-01', 'lt': '2023-04-01'},
),
]
index.index(doc)
query = {
'query': {
'bool': {
'should': [
{'term': {'expected_attendees': {'value': 15}}},
{
'range': {
'time_frame': {
'gte': '2023-02-05',
'lt': '2023-02-10',
'relation': 'contains',
}
}
},
]
}
},
}
docs, _ = index.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
|
import pytest
from pydantic import Field
from docarray import BaseDoc
from docarray.index import ElasticV7DocIndex
from tests.index.elastic.fixture import start_storage_v7 # noqa: F401
pytestmark = [pytest.mark.slow, pytest.mark.index]
def test_column_config():
class MyDoc(BaseDoc):
text: str
color: str = Field(col_type='keyword')
store = ElasticV7DocIndex[MyDoc]()
index_docs = [
MyDoc(id='0', text='hello world', color='red'),
MyDoc(id='1', text='never gonna give you up', color='blue'),
MyDoc(id='2', text='we are the world', color='green'),
]
store.index(index_docs)
query = 'world'
docs, _ = store.text_search(query, search_field='text')
assert [doc.id for doc in docs] == ['0', '2']
filter_query = {'terms': {'color': ['red', 'blue']}}
docs = store.filter(filter_query)
assert [doc.id for doc in docs] == ['0', '1']
def test_field_object():
class MyDoc(BaseDoc):
manager: dict = Field(
properties={
'age': {'type': 'integer'},
'name': {
'properties': {
'first': {'type': 'keyword'},
'last': {'type': 'keyword'},
}
},
}
)
store = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(manager={'age': 25, 'name': {'first': 'Rachel', 'last': 'Green'}}),
MyDoc(manager={'age': 30, 'name': {'first': 'Monica', 'last': 'Geller'}}),
MyDoc(manager={'age': 35, 'name': {'first': 'Phoebe', 'last': 'Buffay'}}),
]
store.index(doc)
id_ = doc[0].id
assert store[id_].id == id_
assert store[id_].manager == doc[0].manager
filter_query = {'range': {'manager.age': {'gte': 30}}}
docs = store.filter(filter_query)
assert [doc.id for doc in docs] == [doc[1].id, doc[2].id]
def test_field_geo_point():
class MyDoc(BaseDoc):
location: dict = Field(col_type='geo_point')
store = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(location={'lat': 40.12, 'lon': -72.34}),
MyDoc(location={'lat': 41.12, 'lon': -73.34}),
MyDoc(location={'lat': 42.12, 'lon': -74.34}),
]
store.index(doc)
query = {
'query': {
'geo_bounding_box': {
'location': {
'top_left': {'lat': 42, 'lon': -74},
'bottom_right': {'lat': 40, 'lon': -72},
}
}
},
}
docs, _ = store.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
def test_field_range():
class MyDoc(BaseDoc):
expected_attendees: dict = Field(col_type='integer_range')
time_frame: dict = Field(col_type='date_range', format='yyyy-MM-dd')
store = ElasticV7DocIndex[MyDoc]()
doc = [
MyDoc(
expected_attendees={'gte': 10, 'lt': 20},
time_frame={'gte': '2023-01-01', 'lt': '2023-02-01'},
),
MyDoc(
expected_attendees={'gte': 20, 'lt': 30},
time_frame={'gte': '2023-02-01', 'lt': '2023-03-01'},
),
MyDoc(
expected_attendees={'gte': 30, 'lt': 40},
time_frame={'gte': '2023-03-01', 'lt': '2023-04-01'},
),
]
store.index(doc)
query = {
'query': {
'bool': {
'should': [
{'term': {'expected_attendees': {'value': 15}}},
{
'range': {
'time_frame': {
'gte': '2023-02-05',
'lt': '2023-02-10',
'relation': 'contains',
}
}
},
]
}
},
}
docs, _ = store.execute_query(query)
assert [doc['id'] for doc in docs] == [doc[0].id, doc[1].id]
|
import logging
import random
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseInformationRetrievalEvaluator,
SpladePooling,
)
logging.basicConfig(format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the Touche-2020 IR dataset (https://huggingface.co/datasets/BeIR/webis-touche2020, https://huggingface.co/datasets/BeIR/webis-touche2020-qrels)
corpus = load_dataset("BeIR/webis-touche2020", "corpus", split="corpus")
queries = load_dataset("BeIR/webis-touche2020", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/webis-touche2020-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 30,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=30_000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-touche2020-subset-test",
show_progress_bar=True,
batch_size=16,
)
# Run evaluation
results = ir_evaluator(model)
# Print the results
print(f"Primary metric: {ir_evaluator.primary_metric}")
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
|
import random
from datasets import load_dataset
from sentence_transformers.sparse_encoder import (
MLMTransformer,
SparseEncoder,
SparseInformationRetrievalEvaluator,
SpladePooling,
)
# Initialize the SPLADE model
model_name = "naver/splade-cocondenser-ensembledistil"
model = SparseEncoder(
modules=[
MLMTransformer(model_name),
SpladePooling(pooling_strategy="max"), # You can also use 'sum'
],
device="cuda:0",
)
# Load the Touche-2020 IR dataset (https://huggingface.co/datasets/BeIR/webis-touche2020, https://huggingface.co/datasets/BeIR/webis-touche2020-qrels)
corpus = load_dataset("BeIR/webis-touche2020", "corpus", split="corpus")
queries = load_dataset("BeIR/webis-touche2020", "queries", split="queries")
relevant_docs_data = load_dataset("BeIR/webis-touche2020-qrels", split="test")
# For this dataset, we want to concatenate the title and texts for the corpus
corpus = corpus.map(lambda x: {"text": x["title"] + " " + x["text"]}, remove_columns=["title"])
# Shrink the corpus size heavily to only the relevant documents + 30,000 random documents
required_corpus_ids = set(map(str, relevant_docs_data["corpus-id"]))
required_corpus_ids |= set(random.sample(corpus["_id"], k=30_000))
corpus = corpus.filter(lambda x: x["_id"] in required_corpus_ids)
# Convert the datasets to dictionaries
corpus = dict(zip(corpus["_id"], corpus["text"])) # Our corpus (cid => document)
queries = dict(zip(queries["_id"], queries["text"])) # Our queries (qid => question)
relevant_docs = {} # Query ID to relevant documents (qid => set([relevant_cids])
for qid, corpus_ids in zip(relevant_docs_data["query-id"], relevant_docs_data["corpus-id"]):
qid = str(qid)
corpus_ids = str(corpus_ids)
if qid not in relevant_docs:
relevant_docs[qid] = set()
relevant_docs[qid].add(corpus_ids)
# Given queries, a corpus and a mapping with relevant documents, the SparseInformationRetrievalEvaluator computes different IR metrics.
ir_evaluator = SparseInformationRetrievalEvaluator(
queries=queries,
corpus=corpus,
relevant_docs=relevant_docs,
name="BeIR-touche2020-subset-test",
show_progress_bar=True,
batch_size=32,
)
# Run evaluation
print("Starting evaluation ")
results = ir_evaluator(model)
print(f"Primary metric: {ir_evaluator.primary_metric}")
print(f"Primary metric value: {results[ir_evaluator.primary_metric]:.4f}")
# Print results for each dataset
for key, value in results.items():
if key.startswith("Nano"):
print(f"{key}: {value:.4f}")
|
import warnings
from typing import Optional, Tuple, TypeVar
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to an audio file.
Can be remote (web) URL, or a local file path.
"""
def load(self: T) -> Tuple[AudioNdArray, int]:
"""
Load the data from the url into an AudioNdArray and the frame rate.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, AudioNdArray)
```
---
:return: tuple of an AudioNdArray representing the Audio file content,
and an integer representing the frame rate.
"""
bytes_ = self.load_bytes()
return bytes_.load()
def load_bytes(self, timeout: Optional[float] = None) -> AudioBytes:
"""
Convert url to AudioBytes. This will either load or download the file and save
it into an AudioBytes object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: AudioBytes object
"""
bytes_ = super().load_bytes(timeout=timeout)
return AudioBytes(bytes_)
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
import warnings
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type, TypeVar, Union
from docarray.typing import AudioNdArray
from docarray.typing.bytes.audio_bytes import AudioBytes
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.filetypes import AUDIO_FILE_FORMATS
from docarray.utils._internal.misc import is_notebook
if TYPE_CHECKING:
from pydantic import BaseConfig
from pydantic.fields import ModelField
T = TypeVar('T', bound='AudioUrl')
@_register_proto(proto_type_name='audio_url')
class AudioUrl(AnyUrl):
"""
URL to a audio file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def validate(
cls: Type[T],
value: Union[T, str, Any],
field: 'ModelField',
config: 'BaseConfig',
) -> T:
import os
from urllib.parse import urlparse
url = super().validate(value, field, config) # basic url validation
path = urlparse(url).path
ext = os.path.splitext(path)[1][1:].lower()
# pass test if extension is valid or no extension
has_audio_extension = ext in AUDIO_FILE_FORMATS or ext == ''
if not has_audio_extension:
raise ValueError('Audio URL must have a valid extension')
return cls(str(url), scheme=None)
def load(self: T) -> Tuple[AudioNdArray, int]:
"""
Load the data from the url into an AudioNdArray and the frame rate.
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import AudioNdArray, AudioUrl
class MyDoc(BaseDoc):
audio_url: AudioUrl
audio_tensor: Optional[AudioNdArray]
doc = MyDoc(audio_url='https://www.kozco.com/tech/piano2.wav')
doc.audio_tensor, _ = doc.audio_url.load()
assert isinstance(doc.audio_tensor, AudioNdArray)
```
---
:return: tuple of an AudioNdArray representing the Audio file content,
and an integer representing the frame rate.
"""
bytes_ = self.load_bytes()
return bytes_.load()
def load_bytes(self, timeout: Optional[float] = None) -> AudioBytes:
"""
Convert url to AudioBytes. This will either load or download the file and save
it into an AudioBytes object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: AudioBytes object
"""
bytes_ = super().load_bytes(timeout=timeout)
return AudioBytes(bytes_)
def display(self):
"""
Play the audio sound from url in notebook.
"""
if is_notebook():
from IPython.display import Audio, display
remote_url = True if self.startswith('http') else False
if remote_url:
display(Audio(data=self))
else:
display(Audio(filename=self))
else:
warnings.warn('Display of image is only possible in a notebook.')
|
from datetime import datetime, timedelta
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import comma_list
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that is used as the datetime format.
Update this to match the desired datetime format for your application.
"""
def get_format_instructions(self) -> str:
"""Returns the format instructions for the given format."""
if self.format == "%Y-%m-%dT%H:%M:%S.%fZ":
examples = comma_list(
[
"2023-07-04T14:30:00.000000Z",
"1999-12-31T23:59:59.999999Z",
"2025-01-01T00:00:00.000000Z",
]
)
else:
try:
now = datetime.now()
examples = comma_list(
[
now.strftime(self.format),
(now.replace(year=now.year - 1)).strftime(self.format),
(now - timedelta(days=1)).strftime(self.format),
]
)
except ValueError:
# Fallback if the format is very unusual
examples = f"e.g., a valid string in the format {self.format}"
return (
f"Write a datetime string that matches the "
f"following pattern: '{self.format}'.\n\n"
f"Examples: {examples}\n\n"
f"Return ONLY this string, no other words!"
)
def parse(self, response: str) -> datetime:
"""Parse a string into a datetime object."""
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
msg = f"Could not parse datetime string: {response}"
raise OutputParserException(msg) from e
@property
def _type(self) -> str:
return "datetime"
|
from datetime import datetime, timedelta
from langchain_core.exceptions import OutputParserException
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.utils import comma_list
class DatetimeOutputParser(BaseOutputParser[datetime]):
"""Parse the output of an LLM call to a datetime."""
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
"""The string value that is used as the datetime format.
Update this to match the desired datetime format for your application.
"""
def get_format_instructions(self) -> str:
"""Returns the format instructions for the given format."""
if self.format == "%Y-%m-%dT%H:%M:%S.%fZ":
examples = comma_list(
[
"2023-07-04T14:30:00.000000Z",
"1999-12-31T23:59:59.999999Z",
"2025-01-01T00:00:00.000000Z",
]
)
else:
try:
now = datetime.now()
examples = comma_list(
[
now.strftime(self.format),
(now.replace(year=now.year - 1)).strftime(self.format),
(now - timedelta(days=1)).strftime(self.format),
]
)
except ValueError:
# Fallback if the format is very unusual
examples = f"e.g., a valid string in the format {self.format}"
return (
f"Write a datetime string that matches the "
f"following pattern: '{self.format}'.\n\n"
f"Examples: {examples}\n\n"
f"Return ONLY this string, no other words!"
)
def parse(self, response: str) -> datetime:
"""Parse a string into a datetime object."""
try:
return datetime.strptime(response.strip(), self.format)
except ValueError as e:
raise OutputParserException(
f"Could not parse datetime string: {response}"
) from e
@property
def _type(self) -> str:
return "datetime"
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import ConfigType, OptConfigType, OptMultiConfig
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = current_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = current_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = current_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (current_dir.parent / 'VERSION.txt').read_text(encoding='utf-8').strip().replace('rc', '-rc')
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<license type="expression">MIT</license>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(current_dir / "LightGBM.nuspec").write_text(nuget_str, encoding='utf-8')
(current_dir / "build" / "LightGBM.props").write_text(prop_str, encoding='utf-8')
(current_dir / "build" / "LightGBM.targets").write_text(target_str, encoding='utf-8')
|
# coding: utf-8
"""Script for generating files with NuGet package metadata."""
import datetime
import sys
from pathlib import Path
from shutil import copyfile
if __name__ == "__main__":
source = Path(sys.argv[1])
current_dir = Path(__file__).absolute().parent
linux_folder_path = current_dir / "runtimes" / "linux-x64" / "native"
linux_folder_path.mkdir(parents=True, exist_ok=True)
osx_folder_path = current_dir / "runtimes" / "osx-x64" / "native"
osx_folder_path.mkdir(parents=True, exist_ok=True)
windows_folder_path = current_dir / "runtimes" / "win-x64" / "native"
windows_folder_path.mkdir(parents=True, exist_ok=True)
build_folder_path = current_dir / "build"
build_folder_path.mkdir(parents=True, exist_ok=True)
copyfile(source / "lib_lightgbm.so", linux_folder_path / "lib_lightgbm.so")
copyfile(source / "lib_lightgbm.dylib", osx_folder_path / "lib_lightgbm.dylib")
copyfile(source / "lib_lightgbm.dll", windows_folder_path / "lib_lightgbm.dll")
copyfile(source / "lightgbm.exe", windows_folder_path / "lightgbm.exe")
version = (current_dir.parent / 'VERSION.txt').read_text(encoding='utf-8').strip().replace('rc', '-rc')
nuget_str = rf"""<?xml version="1.0"?>
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>LightGBM</id>
<version>{version}</version>
<authors>Guolin Ke</authors>
<owners>Guolin Ke</owners>
<licenseUrl>https://github.com/microsoft/LightGBM/blob/master/LICENSE</licenseUrl>
<projectUrl>https://github.com/microsoft/LightGBM</projectUrl>
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A fast, distributed, high performance gradient boosting framework</description>
<copyright>Copyright {datetime.datetime.now().year} @ Microsoft</copyright>
<tags>machine-learning data-mining distributed native boosting gbdt</tags>
<dependencies> </dependencies>
</metadata>
<files>
<file src="build\**" target="build"/>
<file src="runtimes\**" target="runtimes"/>
</files>
</package>
"""
prop_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Condition="Exists('packages.config') OR
Exists('$(MSBuildProjectName).packages.config') OR
Exists('packages.$(MSBuildProjectName).config')">
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.dll"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
<Content Include="$(MSBuildThisFileDirectory)/../runtimes/win-x64/native/*.exe"
Condition="'$(PlatformTarget)' == 'x64'">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
<Visible>false</Visible>
</Content>
</ItemGroup>
</Project>
"""
target_str = r"""
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
</PropertyGroup>
<Target Name="_LightGBMCheckForUnsupportedPlatformTarget"
Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == 'true'"
AfterTargets="_CheckForInvalidConfigurationAndPlatform">
<Error Condition="'$(PlatformTarget)' != 'x64' AND
('$(OutputType)' == 'Exe' OR '$(OutputType)'=='WinExe') AND
!('$(TargetFrameworkIdentifier)' == '.NETCoreApp' AND '$(PlatformTarget)' == '')"
Text="LightGBM currently supports 'x64' processor architectures. Please ensure your application is targeting 'x64'." />
</Target>
</Project>
"""
(current_dir / "LightGBM.nuspec").write_text(nuget_str, encoding='utf-8')
(current_dir / "build" / "LightGBM.props").write_text(prop_str, encoding='utf-8')
(current_dir / "build" / "LightGBM.targets").write_text(target_str, encoding='utf-8')
|
"""Module to test base parser implementations."""
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutputParser,
)
from langchain_core.outputs import ChatGeneration, Generation
def test_base_generation_parser() -> None:
"""Test Base Generation Output Parser."""
class StrInvertCase(BaseGenerationOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
@override
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase()
StrInvertCase.model_rebuild()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")]))
chain = model | StrInvertCase()
assert chain.invoke("") == "HeLLO"
def test_base_transform_output_parser() -> None:
"""Test base transform output parser."""
class StrInvertCase(BaseTransformOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse(self, text: str) -> str:
"""Parse a single string into a specific format."""
raise NotImplementedError
@override
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hello world")]))
chain = model | StrInvertCase()
# inputs to models are ignored, response is hard-coded in model definition
chunks = list(chain.stream(""))
assert chunks == ["HELLO", " ", "WORLD"]
|
"""Module to test base parser implementations."""
from typing_extensions import override
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import (
BaseGenerationOutputParser,
BaseTransformOutputParser,
)
from langchain_core.outputs import ChatGeneration, Generation
def test_base_generation_parser() -> None:
"""Test Base Generation Output Parser."""
class StrInvertCase(BaseGenerationOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
@override
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
StrInvertCase.model_rebuild()
model = GenericFakeChatModel(messages=iter([AIMessage(content="hEllo")]))
chain = model | StrInvertCase()
assert chain.invoke("") == "HeLLO"
def test_base_transform_output_parser() -> None:
"""Test base transform output parser."""
class StrInvertCase(BaseTransformOutputParser[str]):
"""An example parser that inverts the case of the characters in the message."""
def parse(self, text: str) -> str:
"""Parse a single string into a specific format."""
raise NotImplementedError
@override
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> str:
"""Parse a list of model Generations into a specific format.
Args:
result: A list of Generations to be parsed. The Generations are assumed
to be different candidate outputs for a single model input.
Many parsers assume that only a single generation is passed it in.
We will assert for that
partial: Whether to allow partial results. This is used for parsers
that support streaming
"""
if len(result) != 1:
msg = "This output parser can only be used with a single generation."
raise NotImplementedError(msg)
generation = result[0]
if not isinstance(generation, ChatGeneration):
# Say that this one only works with chat generations
msg = "This output parser can only be used with a chat generation."
raise OutputParserException(msg)
content = generation.message.content
assert isinstance(content, str)
return content.swapcase() # type: ignore
model = GenericFakeChatModel(messages=iter([AIMessage(content="hello world")]))
chain = model | StrInvertCase()
# inputs to models are ignored, response is hard-coded in model definition
chunks = list(chain.stream(""))
assert chunks == ["HELLO", " ", "WORLD"]
|
"""Interface for tools."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, tool
class InvalidTool(BaseTool):
"""Tool that is run when invalid tool name is encountered by agent."""
name: str = "invalid_tool"
"""Name of the tool."""
description: str = "Called when tool name is invalid. Suggests valid tool names."
"""Description of the tool."""
def _run(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
async def _arun(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
__all__ = ["InvalidTool", "tool"]
|
"""Interface for tools."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, tool
class InvalidTool(BaseTool): # type: ignore[override]
"""Tool that is run when invalid tool name is encountered by agent."""
name: str = "invalid_tool"
"""Name of the tool."""
description: str = "Called when tool name is invalid. Suggests valid tool names."
"""Description of the tool."""
def _run(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
async def _arun(
self,
requested_tool_name: str,
available_tool_names: list[str],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
available_tool_names_str = ", ".join([tool for tool in available_tool_names])
return (
f"{requested_tool_name} is not a valid tool, "
f"try one of [{available_tool_names_str}]."
)
__all__ = ["InvalidTool", "tool"]
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, List, Optional, Tuple, Type, Union
import cv2
import matplotlib
import numpy as np
import torch
def tensor2ndarray(value: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
"""If the type of value is torch.Tensor, convert the value to np.ndarray.
Args:
value (np.ndarray, torch.Tensor): value.
Returns:
Any: value.
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
return value
def value2list(value: Any, valid_type: Union[Type, Tuple[Type, ...]],
expand_dim: int) -> List[Any]:
"""If the type of ``value`` is ``valid_type``, convert the value to list
and expand to ``expand_dim``.
Args:
value (Any): value.
valid_type (Union[Type, Tuple[Type, ...]): valid type.
expand_dim (int): expand dim.
Returns:
List[Any]: value.
"""
if isinstance(value, valid_type):
value = [value] * expand_dim
return value
def check_type(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]]) -> None:
"""Check whether the type of value is in ``valid_type``.
Args:
name (str): value name.
value (Any): value.
valid_type (Type, Tuple[Type, ...]): expected type.
"""
if not isinstance(value, valid_type):
raise TypeError(f'`{name}` should be {valid_type} '
f' but got {type(value)}')
def check_length(name: str, value: Any, valid_length: int) -> None:
"""If type of the ``value`` is list, check whether its length is equal with
or greater than ``valid_length``.
Args:
name (str): value name.
value (Any): value.
valid_length (int): expected length.
"""
if isinstance(value, list):
if len(value) < valid_length:
raise AssertionError(
f'The length of {name} must equal with or '
f'greater than {valid_length}, but got {len(value)}')
def check_type_and_length(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]],
valid_length: int) -> None:
"""Check whether the type of value is in ``valid_type``. If type of the
``value`` is list, check whether its length is equal with or greater than
``valid_length``.
Args:
value (Any): value.
legal_type (Type, Tuple[Type, ...]): legal type.
valid_length (int): expected length.
Returns:
List[Any]: value.
"""
check_type(name, value, valid_type)
check_length(name, value, valid_length)
def color_val_matplotlib(colors):
"""Convert various input in RGB order to normalized RGB matplotlib color
tuples,
Args:
color (:obj:`mmcv.Color`/str/tuple/int/ndarray): Color inputs
Returns:
tuple[float]: A tuple of 3 normalized floats indicating RGB channels.
"""
if isinstance(colors, str):
return colors
elif isinstance(colors, tuple):
assert len(colors) == 3
for channel in colors:
assert 0 <= channel <= 255
colors = [channel / 255 for channel in colors]
return tuple(colors)
elif isinstance(colors, list):
colors = [color_val_matplotlib(color) for color in colors]
return colors
else:
raise TypeError(f'Invalid type for color: {type(colors)}')
def str_color_to_rgb(color):
color = matplotlib.colors.to_rgb(color)
color = tuple([int(c * 255) for c in color])
return color
def convert_overlay_heatmap(feat_map: Union[np.ndarray, torch.Tensor],
img: Optional[np.ndarray] = None,
alpha: float = 0.5) -> np.ndarray:
"""Convert feat_map to heatmap and overlay on image, if image is not None.
Args:
feat_map (np.ndarray, torch.Tensor): The feat_map to convert
with of shape (H, W), where H is the image height and W is
the image width.
img (np.ndarray, optional): The origin image. The format
should be RGB. Defaults to None.
alpha (float): The transparency of origin image. Defaults to 0.5.
Returns:
np.ndarray: heatmap
"""
if isinstance(feat_map, torch.Tensor):
feat_map = feat_map.detach().cpu().numpy()
norm_img = np.zeros(feat_map.shape)
norm_img = cv2.normalize(feat_map, norm_img, 0, 255, cv2.NORM_MINMAX)
norm_img = np.asarray(norm_img, dtype=np.uint8)
heat_img = cv2.applyColorMap(norm_img, cv2.COLORMAP_JET)
heat_img = cv2.cvtColor(heat_img, cv2.COLOR_BGR2RGB)
if img is not None:
heat_img = cv2.addWeighted(img, alpha, heat_img, 1 - alpha, 0)
return heat_img
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, List, Tuple, Type, Union
import numpy as np
import torch
def tensor2ndarray(value: Union[np.ndarray, torch.Tensor]) -> np.ndarray:
"""If the type of value is torch.Tensor, convert the value to np.ndarray.
Args:
value (np.ndarray, torch.Tensor): value.
Returns:
Any: value.
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
return value
def value2list(value: Any, valid_type: Union[Type, Tuple[Type, ...]],
expand_dim: int) -> List[Any]:
"""If the type of ``value`` is ``valid_type``, convert the value to list
and expand to ``expand_dim``.
Args:
value (Any): value.
valid_type (Union[Type, Tuple[Type, ...]): valid type.
expand_dim (int): expand dim.
Returns:
List[Any]: value.
"""
if isinstance(value, valid_type):
value = [value] * expand_dim
return value
def check_type(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]]) -> None:
"""Check whether the type of value is in ``valid_type``.
Args:
name (str): value name.
value (Any): value.
valid_type (Type, Tuple[Type, ...]): expected type.
"""
if not isinstance(value, valid_type):
raise TypeError(f'`{name}` should be {valid_type} '
f' but got {type(value)}')
def check_length(name: str, value: Any, valid_length: int) -> None:
"""If type of the ``value`` is list, check whether its length is equal with
or greater than ``valid_length``.
Args:
name (str): value name.
value (Any): value.
valid_length (int): expected length.
"""
if isinstance(value, list):
if len(value) < valid_length:
raise AssertionError(
f'The length of {name} must equal with or '
f'greater than {valid_length}, but got {len(value)}')
def check_type_and_length(name: str, value: Any,
valid_type: Union[Type, Tuple[Type, ...]],
valid_length: int) -> None:
"""Check whether the type of value is in ``valid_type``. If type of the
``value`` is list, check whether its length is equal with or greater than
``valid_length``.
Args:
value (Any): value.
legal_type (Type, Tuple[Type, ...]): legal type.
valid_length (int): expected length.
Returns:
List[Any]: value.
"""
check_type(name, value, valid_type)
check_length(name, value, valid_length)
|
_base_ = './fast-rcnn_r50-caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
_base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet101_caffe')))
|
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
from langchain.agents.output_parsers.tools import (
ToolAgentAction,
parse_ai_message_to_tool_action,
)
OpenAIToolAgentAction = ToolAgentAction
def parse_ai_message_to_openai_tool_action(
message: BaseMessage,
) -> Union[list[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
tool_actions = parse_ai_message_to_tool_action(message)
if isinstance(tool_actions, AgentFinish):
return tool_actions
final_actions: list[AgentAction] = []
for action in tool_actions:
if isinstance(action, ToolAgentAction):
final_actions.append(
OpenAIToolAgentAction(
tool=action.tool,
tool_input=action.tool_input,
log=action.log,
message_log=action.message_log,
tool_call_id=action.tool_call_id,
)
)
else:
final_actions.append(action)
return final_actions
class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
Is meant to be used with OpenAI models, as it relies on the specific
tool_calls parameter from OpenAI to convey what tools to use.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-tools-agent-output-parser"
def parse_result(
self, result: list[Generation], *, partial: bool = False
) -> Union[list[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_openai_tool_action(message)
def parse(self, text: str) -> Union[list[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
from typing import List, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGeneration, Generation
from langchain.agents.agent import MultiActionAgentOutputParser
from langchain.agents.output_parsers.tools import (
ToolAgentAction,
parse_ai_message_to_tool_action,
)
OpenAIToolAgentAction = ToolAgentAction
def parse_ai_message_to_openai_tool_action(
message: BaseMessage,
) -> Union[List[AgentAction], AgentFinish]:
"""Parse an AI message potentially containing tool_calls."""
tool_actions = parse_ai_message_to_tool_action(message)
if isinstance(tool_actions, AgentFinish):
return tool_actions
final_actions: List[AgentAction] = []
for action in tool_actions:
if isinstance(action, ToolAgentAction):
final_actions.append(
OpenAIToolAgentAction(
tool=action.tool,
tool_input=action.tool_input,
log=action.log,
message_log=action.message_log,
tool_call_id=action.tool_call_id,
)
)
else:
final_actions.append(action)
return final_actions
class OpenAIToolsAgentOutputParser(MultiActionAgentOutputParser):
"""Parses a message into agent actions/finish.
Is meant to be used with OpenAI models, as it relies on the specific
tool_calls parameter from OpenAI to convey what tools to use.
If a tool_calls parameter is passed, then that is used to get
the tool names and tool inputs.
If one is not passed, then the AIMessage is assumed to be the final output.
"""
@property
def _type(self) -> str:
return "openai-tools-agent-output-parser"
def parse_result(
self, result: List[Generation], *, partial: bool = False
) -> Union[List[AgentAction], AgentFinish]:
if not isinstance(result[0], ChatGeneration):
raise ValueError("This output parser only works on ChatGeneration output")
message = result[0].message
return parse_ai_message_to_openai_tool_action(message)
def parse(self, text: str) -> Union[List[AgentAction], AgentFinish]:
raise ValueError("Can only parse messages")
|
__version__ = '0.33.1'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
__version__ = '0.33.0'
import logging
from docarray.array import DocList, DocVec
from docarray.base_doc.doc import BaseDoc
from docarray.utils._internal.misc import _get_path_from_docarray_root_level
__all__ = ['BaseDoc', 'DocList', 'DocVec']
logger = logging.getLogger('docarray')
handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def __getattr__(name: str):
if name in ['Document', 'DocumentArray']:
raise ImportError(
f'Cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\'.\n'
f'The object named \'{name}\' does not exist anymore in this version of docarray.\n'
f'If you still want to use \'{name}\' please downgrade to version <=0.21.0 '
f'with: `pip install -U docarray==0.21.0`.'
)
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='Text')
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import Text
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
# or
mmdoc.text_doc.bytes = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents import Text
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
bytes: Optional[bytes] = None
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.typing import TextUrl
from docarray.typing.tensor.embedding import AnyEmbedding
T = TypeVar('T', bound='Text')
class Text(BaseDocument):
"""
Document for handling text.
It can contain a TextUrl (`Text.url`), a str (`Text.text`),
and an AnyEmbedding (`Text.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Text
# use it directly
txt_doc = Text(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
You can initialize directly from a string:
.. code-block:: python
from docarray.documents import Text
txt_doc = Text('hello world')
You can extend this Document:
.. code-block:: python
from docarray.documents import Text
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyText(Text):
second_embedding: Optional[AnyEmbedding]
txt_doc = MyText(url='http://www.jina.ai/')
txt_doc.text = txt_doc.url.load()
model = MyEmbeddingModel()
txt_doc.embedding = model(txt_doc.text)
txt_doc.second_embedding = model(txt_doc.text)
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Image, Text
# compose it
class MultiModalDoc(BaseDocument):
image_doc: Image
text_doc: Text
mmdoc = MultiModalDoc(
image_doc=Image(url="http://www.jina.ai/image.jpg"),
text_doc=Text(text="hello world, how are you doing?"),
)
mmdoc.text_doc.text = mmdoc.text_doc.url.load()
#or
mmdoc.text_doc.bytes = mmdoc.text_doc.url.load_bytes()
This Document can be compared against another Document of the same type or a string.
When compared against another object of the same type, the pydantic BaseModel
equality check will apply which checks the equality of every attribute,
including `id`. When compared against a str, it will check the equality
of the `text` attribute against the given string.
.. code-block:: python
from docarray.documents Text
doc = Text(text='This is the main text', url='exampleurl.com')
doc2 = Text(text='This is the main text', url='exampleurl.com')
doc == 'This is the main text' # True
doc == doc2 # False, their ids are not equivalent
"""
text: Optional[str] = None
url: Optional[TextUrl] = None
embedding: Optional[AnyEmbedding] = None
bytes: Optional[bytes] = None
def __init__(self, text: Optional[str] = None, **kwargs):
if 'text' not in kwargs:
kwargs['text'] = text
super().__init__(**kwargs)
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(text=value)
return super().validate(value)
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return self.text == other
else:
# BaseModel has a default equality
return super().__eq__(other)
def __contains__(self, item: str) -> bool:
"""
This method makes `Text` behave the same as an `str`.
.. code-block:: python
from docarray.documents import Text
t = Text(text='this is my text document')
assert 'text' in t
assert 'docarray' not in t
:param item: A string to be checked if is a substring of `text` attribute
:return: A boolean determining the presence of `item` as a substring in `text`
"""
if self.text is not None:
return self.text.__contains__(item)
else:
return False
def _get_string_for_regex_filter(self):
return self.text
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
rng = da.random.default_rng(1)
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of training with Dask on CPU
====================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
from xgboost.dask import DaskDMatrix
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
rng = da.random.default_rng(1)
X = rng.normal(size=(m, n))
y = X.sum(axis=1)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = dxgb.train(
client,
{"verbosity": 1, "tree_method": "hist"},
dtrain,
num_boost_round=4,
evals=[(dtrain, "train")],
)
bst = output["booster"]
history = output["history"]
# you can pass output directly into `predict` too.
prediction = dxgb.predict(client, bst, dtrain)
print("Evaluation history:", history)
return prediction
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Pipeline,
Lumina2Transformer2DModel,
)
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Pipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer,
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
|
import unittest
import torch
from transformers import AutoTokenizer, Gemma2Config, Gemma2Model
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
Lumina2Pipeline,
Lumina2Text2ImgPipeline,
Lumina2Transformer2DModel,
)
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import PipelineTesterMixin
class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = Lumina2Pipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
supports_dduf = False
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self):
torch.manual_seed(0)
transformer = Lumina2Transformer2DModel(
sample_size=4,
patch_size=2,
in_channels=4,
hidden_size=8,
num_layers=2,
num_attention_heads=1,
num_kv_heads=1,
multiple_of=16,
ffn_dim_multiplier=None,
norm_eps=1e-5,
scaling_factor=1.0,
axes_dim_rope=[4, 2, 2],
cap_feat_dim=8,
)
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma")
torch.manual_seed(0)
config = Gemma2Config(
head_dim=4,
hidden_size=8,
intermediate_size=8,
num_attention_heads=2,
num_hidden_layers=2,
num_key_value_heads=2,
sliding_window=2,
)
text_encoder = Gemma2Model(config)
components = {
"transformer": transformer,
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 32,
"width": 32,
"output_type": "np",
}
return inputs
def test_deprecation_raises_warning(self):
with self.assertWarns(FutureWarning) as warning:
_ = Lumina2Text2ImgPipeline(**self.get_dummy_components()).to(torch_device)
warning_message = str(warning.warnings[0].message)
assert "renamed to `Lumina2Pipeline`" in warning_message
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .test_time_aug import BaseTTAModel
from .utils import (convert_sync_batchnorm, detect_anomalous_params,
merge_dict, revert_sync_batchnorm, stack_batch)
from .weight_init import (BaseInit, Caffe2XavierInit, ConstantInit,
KaimingInit, NormalInit, PretrainedInit,
TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init,
constant_init, initialize, kaiming_init, normal_init,
trunc_normal_init, uniform_init, update_init_info,
xavier_init)
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential', 'revert_sync_batchnorm', 'update_init_info',
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
'bias_init_with_prob', 'BaseInit', 'ConstantInit', 'XavierInit',
'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit',
'Caffe2XavierInit', 'PretrainedInit', 'initialize',
'convert_sync_batchnorm', 'BaseTTAModel'
]
if digit_version(TORCH_VERSION) >= digit_version('2.0.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmengine.utils.dl_utils import TORCH_VERSION
from mmengine.utils.version_utils import digit_version
from .averaged_model import (BaseAveragedModel, ExponentialMovingAverage,
MomentumAnnealingEMA, StochasticWeightAverage)
from .base_model import BaseDataPreprocessor, BaseModel, ImgDataPreprocessor
from .base_module import BaseModule, ModuleDict, ModuleList, Sequential
from .test_time_aug import BaseTTAModel
from .utils import (convert_sync_batchnorm, detect_anomalous_params,
merge_dict, revert_sync_batchnorm, stack_batch)
from .weight_init import (BaseInit, Caffe2XavierInit, ConstantInit,
KaimingInit, NormalInit, PretrainedInit,
TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init,
constant_init, initialize, kaiming_init, normal_init,
trunc_normal_init, uniform_init, update_init_info,
xavier_init)
from .wrappers import (MMDistributedDataParallel,
MMSeparateDistributedDataParallel, is_model_wrapper)
__all__ = [
'MMDistributedDataParallel', 'is_model_wrapper', 'BaseAveragedModel',
'StochasticWeightAverage', 'ExponentialMovingAverage',
'MomentumAnnealingEMA', 'BaseModel', 'BaseDataPreprocessor',
'ImgDataPreprocessor', 'MMSeparateDistributedDataParallel', 'BaseModule',
'stack_batch', 'merge_dict', 'detect_anomalous_params', 'ModuleList',
'ModuleDict', 'Sequential', 'revert_sync_batchnorm', 'update_init_info',
'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
'bias_init_with_prob', 'BaseInit', 'ConstantInit', 'XavierInit',
'NormalInit', 'TruncNormalInit', 'UniformInit', 'KaimingInit',
'Caffe2XavierInit', 'PretrainedInit', 'initialize',
'convert_sync_batchnorm', 'BaseTTAModel'
]
if digit_version(TORCH_VERSION) >= digit_version('1.11.0'):
from .wrappers import MMFullyShardedDataParallel # noqa:F401
__all__.append('MMFullyShardedDataParallel')
|
import itertools
from parameterized import parameterized
from torchaudio.backend import sox_io_backend
from torchaudio_unittest.common_utils import get_wav_data, PytorchTestCase, skipIfNoExec, skipIfNoSox, TempDirMixin
from .common import get_enc_params, name_func
@skipIfNoExec("sox")
@skipIfNoSox
class TestRoundTripIO(TempDirMixin, PytorchTestCase):
"""save/load round trip should not degrade data for lossless formats"""
@parameterized.expand(
list(
itertools.product(
["float32", "int32", "int16", "uint8"],
[8000, 16000],
[1, 2],
)
),
name_func=name_func,
)
def test_wav(self, dtype, sample_rate, num_channels):
"""save/load round trip should not degrade data for wav formats"""
original = get_wav_data(dtype, num_channels, normalize=False)
enc, bps = get_enc_params(dtype)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.wav")
sox_io_backend.save(path, data, sample_rate, encoding=enc, bits_per_sample=bps)
data, sr = sox_io_backend.load(path, normalize=False)
assert sr == sample_rate
self.assertEqual(original, data)
@parameterized.expand(
list(
itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)
),
name_func=name_func,
)
def test_flac(self, sample_rate, num_channels, compression_level):
"""save/load round trip should not degrade data for flac formats"""
original = get_wav_data("float32", num_channels)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.flac")
sox_io_backend.save(path, data, sample_rate, compression=compression_level)
data, sr = sox_io_backend.load(path)
assert sr == sample_rate
self.assertEqual(original, data)
|
import itertools
from parameterized import parameterized
from torchaudio.backend import sox_io_backend
from torchaudio_unittest.common_utils import (
get_wav_data,
PytorchTestCase,
skipIfNoExec,
skipIfNoSox,
TempDirMixin,
)
from .common import get_enc_params, name_func
@skipIfNoExec("sox")
@skipIfNoSox
class TestRoundTripIO(TempDirMixin, PytorchTestCase):
"""save/load round trip should not degrade data for lossless formats"""
@parameterized.expand(
list(
itertools.product(
["float32", "int32", "int16", "uint8"],
[8000, 16000],
[1, 2],
)
),
name_func=name_func,
)
def test_wav(self, dtype, sample_rate, num_channels):
"""save/load round trip should not degrade data for wav formats"""
original = get_wav_data(dtype, num_channels, normalize=False)
enc, bps = get_enc_params(dtype)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.wav")
sox_io_backend.save(path, data, sample_rate, encoding=enc, bits_per_sample=bps)
data, sr = sox_io_backend.load(path, normalize=False)
assert sr == sample_rate
self.assertEqual(original, data)
@parameterized.expand(
list(
itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)
),
name_func=name_func,
)
def test_flac(self, sample_rate, num_channels, compression_level):
"""save/load round trip should not degrade data for flac formats"""
original = get_wav_data("float32", num_channels)
data = original
for i in range(10):
path = self.get_temp_path(f"{i}.flac")
sox_io_backend.save(path, data, sample_rate, compression=compression_level)
data, sr = sox_io_backend.load(path)
assert sr == sample_rate
self.assertEqual(original, data)
|
"""
This application demonstrates how to find duplicate questions (paraphrases) in a long
list of sentences.
"""
from sentence_transformers import SentenceTransformer, util
# Questions can be a long list of sentences up to 100k sentences or more.
# For demonstration purposes, we limit it to a few questions which all have on duplicate
questions = [
"How did you catch your spouse cheating?",
"How can I find out if my husband is cheating?",
"Is my wife cheating?",
"How do I know if my partner is cheating?",
"Why is Starbucks in India overrated?",
"Is Starbucks overrated in india?",
"How can I lose weight fast without exercise?",
"Can I lose weight without exercise?",
"Which city is the best in India? Why?",
"Which is the best city in India?",
"How can I stay focused in class?",
"How can I stay focused on my school work?",
"How can I Remotely hack a mobile phone?",
"How can I hack my phone?",
"Where should I stay in Goa?",
"Which are the best hotels in Goa?",
"Why does hair turn white?",
"What causes older peoples hair to turn grey?",
"What is the easiest way to get followers on Quora?",
"How do I get more followers for my Quora?",
]
model = SentenceTransformer("all-MiniLM-L6-v2")
# Given a model and a List of strings (texts), evaluation.ParaphraseMiningEvaluator.paraphrase_mining performs a
# mining task by computing cosine similarity between all possible combinations and returning the ones with the highest scores.
# It returns a list of tuples (score, i, j) with i, j representing the index in the questions list.
pairs = util.paraphrase_mining(model, questions)
# Output Top-20 pairs:
for score, qid1, qid2 in pairs[0:20]:
print(f"{score:.3f}\t{questions[qid1]}\t\t\t{questions[qid2]}")
|
"""
This application demonstrates how to find duplicate questions (paraphrases) in a long
list of sentences.
"""
from sentence_transformers import SentenceTransformer, util
# Questions can be a long list of sentences up to 100k sentences or more.
# For demonstration purposes, we limit it to a few questions which all have on duplicate
questions = [
"How did you catch your spouse cheating?",
"How can I find out if my husband is cheating?",
"Is my wife cheating?",
"How do I know if my partner is cheating?",
"Why is Starbucks in India overrated?",
"Is Starbucks overrated in india?",
"How can I lose weight fast without exercise?",
"Can I lose weight without exercise?",
"Which city is the best in India? Why?",
"Which is the best city in India?",
"How can I stay focused in class?",
"How can I stay focused on my school work?",
"How can I Remotely hack a mobile phone?",
"How can I hack my phone?",
"Where should I stay in Goa?",
"Which are the best hotels in Goa?",
"Why does hair turn white?",
"What causes older peoples hair to turn grey?",
"What is the easiest way to get followers on Quora?",
"How do I get more followers for my Quora?",
]
model = SentenceTransformer("all-MiniLM-L6-v2")
# Given a model and a List of strings (texts), evaluation.ParaphraseMiningEvaluator.paraphrase_mining performs a
# mining task by computing cosine similarity between all possible combinations and returning the ones with the highest scores.
# It returns a list of tuples (score, i, j) with i, j representing the index in the questions list.
pairs = util.paraphrase_mining(model, questions)
# Output Top-20 pairs:
for score, qid1, qid2 in pairs[0:20]:
print("{:.3f}\t{}\t\t\t{}".format(score, questions[qid1], questions[qid2]))
|
"""
Use scikit-learn regressor interface with CPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
def main(client: Client) -> dxgb.Booster:
# generate some random data for demonstration
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = dxgb.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method="hist")
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Use scikit-learn regressor interface with CPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client, LocalCluster
from xgboost import dask as dxgb
def main(client):
# generate some random data for demonstration
n = 100
m = 10000
partition_size = 100
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = dxgb.DaskXGBRegressor(verbosity=1, n_estimators=2)
regressor.set_params(tree_method="hist")
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print("Evaluation history:", history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
from unittest.mock import mock_open, patch
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from llama_index.llms.cortex.utils import (
generate_sf_jwt,
is_spcs_environment,
get_spcs_base_url,
get_default_spcs_token,
SPCS_TOKEN_PATH,
)
import os
def test_spcs_utils():
# Setup environment variables
os.environ["SNOWFLAKE_HOST"] = "snowflake.example-spcs-host.com"
os.environ["SNOWFLAKE_ACCOUNT"] = "abcdef_ghijkl"
# Test get_spcs_base_url
expected_url = "https://abcdef-ghijkl.example-spcs-host.com"
# Mock the path check to ensure we're not in SPCS environment
with patch("os.path.exists", return_value=False):
# Test that ValueError is raised when not in SPCS environment
try:
get_spcs_base_url()
assert AssertionError("ValueError not raised when not in SPCS environment")
except ValueError:
pass
# Mock the path check to pretend we're in SPCS environment
with patch("os.path.exists", return_value=True):
# Test that base URL is correctly formed
base_url = get_spcs_base_url()
assert base_url == expected_url
# Test is_spcs_environment
with patch("os.path.exists", return_value=True):
assert is_spcs_environment()
with patch("os.path.exists", return_value=False):
assert not is_spcs_environment()
# Test get_default_spcs_token
fake_token = "fake-jwt-token-for-testing"
with patch("builtins.open", mock_open(read_data=fake_token)) as mock_file:
token = get_default_spcs_token()
assert token == fake_token
mock_file.assert_called_once_with(SPCS_TOKEN_PATH)
# Clean up environment variables
del os.environ["SNOWFLAKE_HOST"]
del os.environ["SNOWFLAKE_ACCOUNT"]
def test_generate_sf_jwt():
sf_account = "MY_SNOWFLAKE_ORG-MY_SNOWFLAKE_ACCOUNT"
sf_user = "MY_SNOWFLAKE_USER"
private_key_obj = rsa.generate_private_key(public_exponent=65537, key_size=2048)
# Serialize the private key to PEM format
private_key = private_key_obj.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
with patch("builtins.open", mock_open(read_data=private_key)) as mock_file:
mock_file.return_value.read.return_value = (
private_key # Ensure binary data is returned
)
token = generate_sf_jwt(sf_account, sf_user, "dummy_key_file.pem")
assert isinstance(token, str)
|
from unittest.mock import mock_open, patch
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from llama_index.llms.cortex.utils import generate_sf_jwt
def test_generate_sf_jwt():
sf_account = "MY_SNOWFLAKE_ORG-MY_SNOWFLAKE_ACCOUNT"
sf_user = "MY_SNOWFLAKE_USER"
private_key_obj = rsa.generate_private_key(public_exponent=65537, key_size=2048)
# Serialize the private key to PEM format
private_key = private_key_obj.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
with patch("builtins.open", mock_open(read_data=private_key)) as mock_file:
mock_file.return_value.read.return_value = (
private_key # Ensure binary data is returned
)
token = generate_sf_jwt(sf_account, sf_user, "dummy_key_file.pem")
assert isinstance(token, str)
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.api_export import keras_export
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
BackendVariable = Variable # noqa: F405
@keras_export("keras.Variable")
class Variable(BackendVariable):
pass
backend_name_scope = name_scope # noqa: F405
@keras_export("keras.name_scope")
class name_scope(backend_name_scope):
pass
@keras_export("keras.device")
def device(device_name):
return device_scope(device_name) # noqa: F405
|
from keras.src.backend.config import backend
if backend() == "torch":
# When using the torch backend,
# torch needs to be imported first, otherwise it will segfault
# upon import.
import torch
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.keras_tensor import is_keras_tensor
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.backend.config import epsilon
from keras.src.backend.config import floatx
from keras.src.backend.config import image_data_format
from keras.src.backend.config import set_epsilon
from keras.src.backend.config import set_floatx
from keras.src.backend.config import set_image_data_format
from keras.src.backend.config import standardize_data_format
# Import backend functions.
if backend() == "tensorflow":
from keras.src.backend.tensorflow import * # noqa: F403
elif backend() == "jax":
from keras.src.backend.jax import * # noqa: F403
elif backend() == "torch":
from keras.src.backend.torch import * # noqa: F403
distribution_lib = None
elif backend() == "numpy":
from keras.src.backend.numpy import * # noqa: F403
distribution_lib = None
else:
raise ValueError(f"Unable to import backend : {backend()}")
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = '$WORK_DIR/' + fname
checkpoint = model_info['checkpoint'].strip()
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=$CPUS_PRE_TASK {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--cfg-option env_cfg.dist_cfg.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
work_dir = 'WORK_DIR=$3 '
commands.append(work_dir)
commands.append('\n')
cpus_pre_task = 'CPUS_PER_TASK=${4:-2} '
commands.append(cpus_pre_task)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
from mmcv import Config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert benchmark model list to script')
parser.add_argument('config', help='test config file path')
parser.add_argument('--port', type=int, default=29666, help='dist port')
parser.add_argument(
'--work-dir',
default='tools/batch_test',
help='the dir to save metric')
parser.add_argument(
'--run', action='store_true', help='run script directly')
parser.add_argument(
'--out', type=str, help='path to save model benchmark script')
args = parser.parse_args()
return args
def process_model_info(model_info, work_dir):
config = model_info['config'].strip()
fname, _ = osp.splitext(osp.basename(config))
job_name = fname
work_dir = osp.join(work_dir, fname)
checkpoint = model_info['checkpoint'].strip()
if not isinstance(model_info['eval'], list):
evals = [model_info['eval']]
else:
evals = model_info['eval']
eval = ' '.join(evals)
return dict(
config=config,
job_name=job_name,
work_dir=work_dir,
checkpoint=checkpoint,
eval=eval)
def create_test_bash_info(commands, model_test_dict, port, script_name,
partition):
config = model_test_dict['config']
job_name = model_test_dict['job_name']
checkpoint = model_test_dict['checkpoint']
work_dir = model_test_dict['work_dir']
eval = model_test_dict['eval']
echo_info = f' \necho \'{config}\' &'
commands.append(echo_info)
commands.append('\n')
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
f'CPUS_PER_TASK=2 {script_name} '
command_info += f'{partition} '
command_info += f'{job_name} '
command_info += f'{config} '
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
command_info += f'--work-dir {work_dir} '
command_info += f'--eval {eval} '
command_info += f'--cfg-option dist_params.port={port} '
command_info += ' &'
commands.append(command_info)
def main():
args = parse_args()
if args.out:
out_suffix = args.out.split('.')[-1]
assert args.out.endswith('.sh'), \
f'Expected out file path suffix is .sh, but get .{out_suffix}'
assert args.out or args.run, \
('Please specify at least one operation (save/run/ the '
'script) with the argument "--out" or "--run"')
commands = []
partition_name = 'PARTITION=$1 '
commands.append(partition_name)
commands.append('\n')
checkpoint_root = 'CHECKPOINT_DIR=$2 '
commands.append(checkpoint_root)
commands.append('\n')
script_name = osp.join('tools', 'slurm_test.sh')
port = args.port
work_dir = args.work_dir
cfg = Config.fromfile(args.config)
for model_key in cfg:
model_infos = cfg[model_key]
if not isinstance(model_infos, list):
model_infos = [model_infos]
for model_info in model_infos:
print('processing: ', model_info['config'])
model_test_dict = process_model_info(model_info, work_dir)
create_test_bash_info(commands, model_test_dict, port, script_name,
'$PARTITION')
port += 1
command_str = ''.join(commands)
if args.out:
with open(args.out, 'w') as f:
f.write(command_str)
if args.run:
os.system(command_str)
if __name__ == '__main__':
main()
|
import os
import sys
import cognee
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="mock strategy requires python3.10 or higher"
)
@pytest.mark.skipif(
os.getenv("OPENAI_API_KEY") is None,
reason="OPENAI_API_KEY not available to test Cognee integration",
)
@pytest.mark.asyncio()
async def test_graph_rag_cognee():
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key=os.environ["OPENAI_API_KEY"],
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Add data to cognee
await cogneeRAG.add(documents, "test")
# Process data into a knowledge graph
await cogneeRAG.process_data("test")
# Answer prompt based on knowledge graph
search_results = await cogneeRAG.search("Tell me who are the people mentioned?")
assert len(search_results) > 0, "No search results found"
print("\n\nAnswer based on knowledge graph:\n")
for result in search_results:
print(f"{result}\n")
# Answer prompt based on RAG
search_results = await cogneeRAG.rag_search("Tell me who are the people mentioned?")
assert len(search_results) > 0, "No search results found"
print("\n\nAnswer based on RAG:\n")
for result in search_results:
print(f"{result}\n")
# Search for related nodes
search_results = await cogneeRAG.get_related_nodes("person")
print("\n\nRelated nodes are:\n")
for result in search_results:
print(f"{result}\n")
assert len(search_results) > 0, "No search results found"
# Clean all data from previous runs
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)
|
import os
import asyncio
import cognee
import pytest
from llama_index.core import Document
from llama_index.graph_rag.cognee import CogneeGraphRAG
@pytest.mark.skipif(
os.getenv("OPENAI_API_KEY") is None,
reason="OPENAI_API_KEY not available to test Cognee integration",
)
@pytest.mark.asyncio()
async def test_graph_rag_cognee():
documents = [
Document(
text="Jessica Miller, Experienced Sales Manager with a strong track record in driving sales growth and building high-performing teams."
),
Document(
text="David Thompson, Creative Graphic Designer with over 8 years of experience in visual design and branding."
),
]
# Instantiate cognee GraphRAG
cogneeRAG = CogneeGraphRAG(
llm_api_key=os.environ["OPENAI_API_KEY"],
llm_provider="openai",
llm_model="gpt-4o-mini",
graph_db_provider="networkx",
vector_db_provider="lancedb",
relational_db_provider="sqlite",
relational_db_name="cognee_db",
)
# Add data to cognee
await cogneeRAG.add(documents, "test")
# Process data into a knowledge graph
await cogneeRAG.process_data("test")
# Answer prompt based on knowledge graph
search_results = await cogneeRAG.search("Tell me who are the people mentioned?")
assert len(search_results) > 0, "No search results found"
print("\n\nAnswer based on knowledge graph:\n")
for result in search_results:
print(f"{result}\n")
# Answer prompt based on RAG
search_results = await cogneeRAG.rag_search("Tell me who are the people mentioned?")
assert len(search_results) > 0, "No search results found"
print("\n\nAnswer based on RAG:\n")
for result in search_results:
print(f"{result}\n")
# Search for related nodes
search_results = await cogneeRAG.get_related_nodes("person")
print("\n\nRelated nodes are:\n")
for result in search_results:
print(f"{result}\n")
assert len(search_results) > 0, "No search results found"
# Clean all data from previous runs
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)
if __name__ == "__main__":
asyncio.run(test_graph_rag_cognee())
|
from typing import Dict, Iterable, Sequence
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns.items():
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
if self._config.tag_indices:
for index in self._config.tag_indices:
text = doc.tags.get(index)
if text is not None:
extra_columns[index] = text
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
if self._list_like:
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids, list_like=self._list_like)
else:
self._offset2ids = Offset2ID([], list_like=self._list_like)
def _save_offset2ids(self):
if self._list_like:
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.ft(index_name=self._config.index_name).dropindex(
delete_documents=True
)
self._client.delete(self._offset2id_key)
|
from typing import Dict, Iterable, Sequence
from docarray import Document
from docarray.array.storage.base.getsetdel import BaseGetSetDelMixin
from docarray.array.storage.base.helper import Offset2ID
class GetSetDelMixin(BaseGetSetDelMixin):
"""Provide concrete implementation for ``__getitem__``, ``__setitem__``,
and ``__delitem__`` for ``DocumentArrayRedis``"""
def _get_doc_by_id(self, _id: str) -> 'Document':
"""Concrete implementation of base class' ``_get_doc_by_id``
:param _id: the id of the document
:return: the retrieved document from redis
"""
try:
result = self._client.hgetall(self._doc_prefix + _id)
doc = Document.from_base64(result[b'blob'])
return doc
except Exception as ex:
raise KeyError(_id) from ex
def _get_docs_by_ids(self, ids: Sequence[str]) -> Iterable['Document']:
"""Concrete implementation of base class' ``_get_docs_by_ids``
:param ids: ids of the document
:return: Iterable[Document]
"""
accumulated_docs = []
accumulated_docs_id_not_found = []
if not ids:
return accumulated_docs
pipe = self._client.pipeline()
for id in ids:
pipe.hgetall(self._doc_prefix + id)
results = pipe.execute()
for i, result in enumerate(results):
if result:
accumulated_docs.append(Document.from_base64(result[b'blob']))
else:
accumulated_docs_id_not_found.append(ids[i])
if accumulated_docs_id_not_found:
raise KeyError(accumulated_docs_id_not_found, accumulated_docs)
return accumulated_docs
def _set_doc_by_id(self, _id: str, value: 'Document'):
"""Concrete implementation of base class' ``_set_doc_by_id``
:param _id: the id of doc to update
:param value: the document to update to
"""
self._del_doc_by_id(_id)
if _id != value.id:
self._del_doc_by_id(value.id)
payload = self._document_to_redis(value)
self._client.hset(self._doc_prefix + value.id, mapping=payload)
def _set_docs_by_ids(self, ids, docs: Iterable['Document'], mismatch_ids: Dict):
"""Overridden implementation of _set_docs_by_ids in order to add docs in batches and flush at the end
:param ids: the ids used for indexing
"""
for _id, doc in zip(ids, docs):
self._del_doc_by_id(_id)
if _id != doc.id:
self._del_doc_by_id(doc.id)
self._upload_batch(docs)
def _del_doc_by_id(self, _id: str):
"""Concrete implementation of base class' ``_del_doc_by_id``
:param _id: the id of the document to delete
"""
if self._doc_id_exists(_id):
self._client.delete(self._doc_prefix + _id)
def _document_to_redis(self, doc: 'Document') -> Dict:
extra_columns = {}
for col, _ in self._config.columns.items():
tag = doc.tags.get(col)
if tag is not None:
extra_columns[col] = int(tag) if isinstance(tag, bool) else tag
if self._config.tag_indices:
for index in self._config.tag_indices:
text = doc.tags.get(index)
if text is not None:
extra_columns[index] = text
payload = {
'id': doc.id,
'embedding': self._map_embedding(doc.embedding),
'blob': doc.to_base64(),
**extra_columns,
}
if doc.text:
payload['text'] = doc.text
return payload
def _load_offset2ids(self):
ids = self._get_offset2ids_meta()
self._offset2ids = Offset2ID(ids)
def _save_offset2ids(self):
self._update_offset2ids_meta()
def _clear_storage(self):
self._client.ft(index_name=self._config.index_name).dropindex(
delete_documents=True
)
self._client.delete(self._offset2id_key)
|
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
_base_ = './sparse-rcnn_r50_fpn_ms-480-800-3x_coco.py'
num_proposals = 300
model = dict(
rpn_head=dict(num_proposals=num_proposals),
test_cfg=dict(
_delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
# augmentation strategy originates from DETR.
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
scales=[(400, 1333), (500, 1333), (600, 1333)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
keep_ratio=True)
]]),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
|
from pathlib import Path
from typing import Callable
import numpy as np
import pytest
import torchaudio
from jina import Document, DocumentArray
from ..vad_speech_segmenter import VADSpeechSegmenter
@pytest.fixture(scope='module')
def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter':
workspace = tmpdir_factory.mktemp('data')
return VADSpeechSegmenter(
normalize=False, dump=True, metas={'workspace': str(workspace)}
)
@pytest.fixture
def build_da() -> Callable[[str], 'Document']:
def _build_da(_type):
assert _type in {'wav', 'mp3', 'blob'}
doc = Document(id=_type)
extension = _type if _type != 'blob' else 'wav'
path = str(Path(__file__).parent / f'data/audio/test.{extension}')
if _type == 'blob':
data, sample_rate = torchaudio.load(path)
data = np.mean(data.cpu().numpy(), axis=0)
doc.blob = data
doc.tags['sample_rate'] = sample_rate
else:
doc.uri = path
return DocumentArray(doc)
return _build_da
|
import pytest
import os
from typing import Callable
from pathlib import Path
from jina import Document, DocumentArray
import numpy as np
import torchaudio
from ..vad_speech_segmenter import VADSpeechSegmenter
@pytest.fixture(scope='module')
def segmenter(tmpdir_factory) -> 'VADSpeechSegmenter':
workspace = tmpdir_factory.mktemp('data')
return VADSpeechSegmenter(
normalize=False, dump=True, metas={'workspace': str(workspace)}
)
@pytest.fixture
def build_da() -> Callable[[str], 'Document']:
def _build_da(_type):
assert _type in {'wav', 'mp3', 'blob'}
doc = Document(id=_type)
extension = _type if _type != 'blob' else 'wav'
path = str(Path(__file__).parent / f'data/audio/test.{extension}')
if _type == 'blob':
data, sample_rate = torchaudio.load(path)
data = np.mean(data.cpu().numpy(), axis=0)
doc.blob = data
doc.tags['sample_rate'] = sample_rate
else:
doc.uri = path
return DocumentArray(doc)
return _build_da
|
from typing import TYPE_CHECKING, Any, Dict, Type
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.typing import Tensor
from ..abstract_document import AbstractDocument
from ..base_node import BaseNode
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['ProtoMixin']:
"""
Accessing the nested python Class define in the schema. Could be useful for reconstruction of Document in
serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
@classmethod
def from_protobuf(cls, pb_msg: 'DocumentProto') -> 'ProtoMixin':
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
if content_type == 'tensor':
fields[field] = Tensor.read_ndarray(value.tensor)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_nested_item_protobuf()
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
f'Field `{field}` contains cyclic reference in memory. '
f'Could it be your Document is referring to itself?',
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should be called when the Document
is nest into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
from typing import TYPE_CHECKING, Any, Dict, Type
from docarray.proto import DocumentProto, NdArrayProto, NodeProto
from docarray.proto.io import flush_ndarray, read_ndarray
from docarray.typing import Tensor
from ..abstract_document import AbstractDocument
from ..base_node import BaseNode
class ProtoMixin(AbstractDocument, BaseNode):
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['ProtoMixin']:
"""
Accessing the nested python Class define in the schema. Could be useful for reconstruction of Document in
serialization/deserilization
:param field: name of the field
:return:
"""
return cls.__fields__[field].type_
@classmethod
def from_protobuf(cls, pb_msg: 'DocumentProto') -> 'ProtoMixin':
"""create a Document from a protobuf message"""
from docarray import DocumentArray
fields: Dict[str, Any] = {}
for field in pb_msg.data:
value = pb_msg.data[field]
content_type = value.WhichOneof('content')
if content_type == 'tensor':
fields[field] = read_ndarray(value.tensor)
elif content_type == 'text':
fields[field] = value.text
elif content_type == 'nested':
fields[field] = cls._get_nested_document_class(field).from_protobuf(
value.nested
) # we get to the parent class
elif content_type == 'chunks':
fields[field] = DocumentArray.from_protobuf(
value.chunks
) # we get to the parent class
elif content_type is None:
fields[field] = None
else:
raise ValueError(
f'type {content_type} is not supported for deserialization'
)
return cls(**fields)
def to_protobuf(self) -> 'DocumentProto':
"""Convert Document into a Protobuf message.
:return: the protobuf message
"""
data = {}
for field, value in self:
try:
if isinstance(value, BaseNode):
nested_item = value._to_nested_item_protobuf()
elif isinstance(value, Tensor):
nd_proto = NdArrayProto()
flush_ndarray(nd_proto, value=value)
NodeProto(tensor=nd_proto)
nested_item = NodeProto(tensor=nd_proto)
elif type(value) is str:
nested_item = NodeProto(text=value)
elif type(value) is bytes:
nested_item = NodeProto(blob=value)
elif value is None:
nested_item = NodeProto()
else:
raise ValueError(f'field {field} with {value} is not supported')
data[field] = nested_item
except RecursionError as ex:
if len(ex.args) >= 1:
ex.args = (
f'Field `{field}` contains cyclic reference in memory. '
f'Could it be your Document is referring to itself?',
)
raise
except Exception as ex:
if len(ex.args) >= 1:
ex.args = (f'Field `{field}` is problematic',) + ex.args
raise
return DocumentProto(data=data)
def _to_nested_item_protobuf(self) -> 'NodeProto':
"""Convert Document into a nested item protobuf message. This function should be called when the Document
is nest into another Document that need to be converted into a protobuf
:return: the nested item protobuf message
"""
return NodeProto(nested=self.to_protobuf())
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
# Turn 1.11.0aHASH into 1.11 (major.minor only)
version = ".".join(torchvision.__version__.split(".")[:2])
if version >= "0.16":
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
"""Run smoke tests"""
import sys
from pathlib import Path
import torch
import torchvision
from torchvision.io import decode_jpeg, read_file, read_image
from torchvision.models import resnet50, ResNet50_Weights
SCRIPT_DIR = Path(__file__).parent
def smoke_test_torchvision() -> None:
print(
"Is torchvision usable?",
all(x is not None for x in [torch.ops.image.decode_png, torch.ops.torchvision.roi_align]),
)
def smoke_test_torchvision_read_decode() -> None:
img_jpg = read_image(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
img_png = read_image(str(SCRIPT_DIR / "assets" / "interlaced_png" / "wizard_low.png"))
if img_png.shape != (4, 471, 354):
raise RuntimeError(f"Unexpected shape of img_png: {img_png.shape}")
def smoke_test_torchvision_decode_jpeg(device: str = "cpu"):
img_jpg_data = read_file(str(SCRIPT_DIR / "assets" / "encode_jpeg" / "grace_hopper_517x606.jpg"))
img_jpg = decode_jpeg(img_jpg_data, device=device)
if img_jpg.shape != (3, 606, 517):
raise RuntimeError(f"Unexpected shape of img_jpg: {img_jpg.shape}")
def smoke_test_compile() -> None:
try:
model = resnet50().cuda()
model = torch.compile(model)
x = torch.randn(1, 3, 224, 224, device="cuda")
out = model(x)
print(f"torch.compile model output: {out.shape}")
except RuntimeError:
if sys.platform == "win32":
print("Successfully caught torch.compile RuntimeError on win")
elif sys.version_info >= (3, 11, 0):
print("Successfully caught torch.compile RuntimeError on Python 3.11")
else:
raise
def smoke_test_torchvision_resnet50_classify(device: str = "cpu") -> None:
img = read_image(str(SCRIPT_DIR / ".." / "gallery" / "assets" / "dog2.jpg")).to(device)
# Step 1: Initialize model with the best available weights
weights = ResNet50_Weights.DEFAULT
model = resnet50(weights=weights).to(device)
model.eval()
# Step 2: Initialize the inference transforms
preprocess = weights.transforms(antialias=(device != "mps")) # antialias not supported on MPS
# Step 3: Apply inference preprocessing transforms
batch = preprocess(img).unsqueeze(0)
# Step 4: Use the model and print the predicted category
prediction = model(batch).squeeze(0).softmax(0)
class_id = prediction.argmax().item()
score = prediction[class_id].item()
category_name = weights.meta["categories"][class_id]
expected_category = "German shepherd"
print(f"{category_name} ({device}): {100 * score:.1f}%")
if category_name != expected_category:
raise RuntimeError(f"Failed ResNet50 classify {category_name} Expected: {expected_category}")
def main() -> None:
print(f"torchvision: {torchvision.__version__}")
print(f"torch.cuda.is_available: {torch.cuda.is_available()}")
# Turn 1.11.0aHASH into 1.11 (major.minor only)
version = ".".join(torchvision.__version__.split(".")[:2])
if version >= "0.16":
print(f"{torch.ops.image._jpeg_version() = }")
assert torch.ops.image._is_compiled_against_turbo()
smoke_test_torchvision()
smoke_test_torchvision_read_decode()
smoke_test_torchvision_resnet50_classify()
smoke_test_torchvision_decode_jpeg()
if torch.cuda.is_available():
smoke_test_torchvision_decode_jpeg("cuda")
smoke_test_torchvision_resnet50_classify("cuda")
smoke_test_compile()
if torch.backends.mps.is_available():
smoke_test_torchvision_resnet50_classify("mps")
if __name__ == "__main__":
main()
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDocument):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain an Mesh3DUrl (`Mesh3D.url`), a VerticesAndFaces
object containing an AnyTensor of vertices (`Mesh3D.tensors.vertices) and an
AnyTensor of faces (`Mesh3D.tensors.faces), and an AnyEmbedding
(`Mesh3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.tensors.vertices)
You can extend this Document:
.. code-block:: python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[Text]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.vertices)
mesh.name = 'my first mesh'
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Mesh3D, Text
# compose it
class MultiModalDoc(BaseDocument):
mesh: Mesh3D
text: Text
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes_ = mmdoc.mesh.url.load_bytes()
You can display your 3D mesh in a notebook from either its url, or its tensors:
.. code-block:: python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.tensors.vertices)
"""
url: Optional[Mesh3DUrl]
tensors: Optional[VerticesAndFaces]
embedding: Optional[AnyEmbedding]
bytes_: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
from typing import Any, Optional, Type, TypeVar, Union
from docarray.base_document import BaseDocument
from docarray.documents.mesh.vertices_and_faces import VerticesAndFaces
from docarray.typing.tensor.embedding import AnyEmbedding
from docarray.typing.url.url_3d.mesh_url import Mesh3DUrl
T = TypeVar('T', bound='Mesh3D')
class Mesh3D(BaseDocument):
"""
Document for handling meshes for 3D data representation.
A mesh is a representation for 3D data and contains vertices and faces information.
Vertices are points in a 3D space, represented as a tensor of shape (n_points, 3).
Faces are triangular surfaces that can be defined by three points in 3D space,
corresponding to the three vertices of a triangle. Faces can be represented as a
tensor of shape (n_faces, 3). Each number in that tensor refers to an index of a
vertex in the tensor of vertices.
The Mesh3D Document can contain an Mesh3DUrl (`Mesh3D.url`), a VerticesAndFaces
object containing an AnyTensor of vertices (`Mesh3D.tensors.vertices) and an
AnyTensor of faces (`Mesh3D.tensors.faces), and an AnyEmbedding
(`Mesh3D.embedding`).
EXAMPLE USAGE:
You can use this Document directly:
.. code-block:: python
from docarray.documents import Mesh3D
# use it directly
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.tensors.vertices)
You can extend this Document:
.. code-block:: python
from docarray.documents import Mesh3D
from docarray.typing import AnyEmbedding
from typing import Optional
# extend it
class MyMesh3D(Mesh3D):
name: Optional[Text]
mesh = MyMesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.vertices)
mesh.name = 'my first mesh'
You can use this Document for composition:
.. code-block:: python
from docarray import BaseDocument
from docarray.documents import Mesh3D, Text
# compose it
class MultiModalDoc(BaseDocument):
mesh: Mesh3D
text: Text
mmdoc = MultiModalDoc(
mesh=Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'),
text=Text(text='hello world, how are you doing?'),
)
mmdoc.mesh.tensors = mmdoc.mesh.url.load()
# or
mmdoc.mesh.bytes = mmdoc.mesh.url.load_bytes()
You can display your 3D mesh in a notebook from either its url, or its tensors:
.. code-block:: python
from docarray.documents import Mesh3D
# display from url
mesh = Mesh3D(url='https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj')
mesh.url.display()
# display from tensors
mesh.tensors = mesh.url.load()
model = MyEmbeddingModel()
mesh.embedding = model(mesh.tensors.vertices)
"""
url: Optional[Mesh3DUrl]
tensors: Optional[VerticesAndFaces]
embedding: Optional[AnyEmbedding]
bytes: Optional[bytes]
@classmethod
def validate(
cls: Type[T],
value: Union[str, Any],
) -> T:
if isinstance(value, str):
value = cls(url=value)
return super().validate(value)
|
"""
===================================
Examples of Using `FrozenEstimator`
===================================
This example showcases some use cases of :class:`~sklearn.frozen.FrozenEstimator`.
:class:`~sklearn.frozen.FrozenEstimator` is a utility class that allows to freeze a
fitted estimator. This is useful, for instance, when we want to pass a fitted estimator
to a meta-estimator, such as :class:`~sklearn.model_selection.FixedThresholdClassifier`
without letting the meta-estimator refit the estimator.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Setting a decision threshold for a pre-fitted classifier
# --------------------------------------------------------
# Fitted classifiers in scikit-learn use an arbitrary decision threshold to decide
# which class the given sample belongs to. The decision threshold is either `0.0` on the
# value returned by :term:`decision_function`, or `0.5` on the probability returned by
# :term:`predict_proba`.
#
# However, one might want to set a custom decision threshold. We can do this by
# using :class:`~sklearn.model_selection.FixedThresholdClassifier` and wrapping the
# classifier with :class:`~sklearn.frozen.FrozenEstimator`.
from sklearn.datasets import make_classification
from sklearn.frozen import FrozenEstimator
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import FixedThresholdClassifier, train_test_split
X, y = make_classification(n_samples=1000, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
classifier = LogisticRegression().fit(X_train, y_train)
print(
"Probability estimates for three data points:\n"
f"{classifier.predict_proba(X_test[-3:]).round(3)}"
)
print(
"Predicted class for the same three data points:\n"
f"{classifier.predict(X_test[-3:])}"
)
# %%
# Now imagine you'd want to set a different decision threshold on the probability
# estimates. We can do this by wrapping the classifier with
# :class:`~sklearn.frozen.FrozenEstimator` and passing it to
# :class:`~sklearn.model_selection.FixedThresholdClassifier`.
threshold_classifier = FixedThresholdClassifier(
estimator=FrozenEstimator(classifier), threshold=0.9
)
# %%
# Note that in the above piece of code, calling `fit` on
# :class:`~sklearn.model_selection.FixedThresholdClassifier` does not refit the
# underlying classifier.
#
# Now, let's see how the predictions changed with respect to the probability
# threshold.
print(
"Probability estimates for three data points with FixedThresholdClassifier:\n"
f"{threshold_classifier.predict_proba(X_test[-3:]).round(3)}"
)
print(
"Predicted class for the same three data points with FixedThresholdClassifier:\n"
f"{threshold_classifier.predict(X_test[-3:])}"
)
# %%
# We see that the probability estimates stay the same, but since a different decision
# threshold is used, the predicted classes are different.
#
# Please refer to
# :ref:`sphx_glr_auto_examples_model_selection_plot_cost_sensitive_learning.py`
# to learn about cost-sensitive learning and decision threshold tuning.
# %%
# Calibration of a pre-fitted classifier
# --------------------------------------
# You can use :class:`~sklearn.frozen.FrozenEstimator` to calibrate a pre-fitted
# classifier using :class:`~sklearn.calibration.CalibratedClassifierCV`.
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import brier_score_loss
calibrated_classifier = CalibratedClassifierCV(
estimator=FrozenEstimator(classifier)
).fit(X_train, y_train)
prob_pos_clf = classifier.predict_proba(X_test)[:, 1]
clf_score = brier_score_loss(y_test, prob_pos_clf)
print(f"No calibration: {clf_score:.3f}")
prob_pos_calibrated = calibrated_classifier.predict_proba(X_test)[:, 1]
calibrated_score = brier_score_loss(y_test, prob_pos_calibrated)
print(f"With calibration: {calibrated_score:.3f}")
|
"""
===================================
Examples of Using `FrozenEstimator`
===================================
This examples showcases some use cases of :class:`~sklearn.frozen.FrozenEstimator`.
:class:`~sklearn.frozen.FrozenEstimator` is a utility class that allows to freeze a
fitted estimator. This is useful, for instance, when we want to pass a fitted estimator
to a meta-estimator, such as :class:`~sklearn.model_selection.FixedThresholdClassifier`
without letting the meta-estimator refit the estimator.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Setting a decision threshold for a pre-fitted classifier
# --------------------------------------------------------
# Fitted classifiers in scikit-learn use an arbitrary decision threshold to decide
# which class the given sample belongs to. The decision threshold is either `0.0` on the
# value returned by :term:`decision_function`, or `0.5` on the probability returned by
# :term:`predict_proba`.
#
# However, one might want to set a custom decision threshold. We can do this by
# using :class:`~sklearn.model_selection.FixedThresholdClassifier` and wrapping the
# classifier with :class:`~sklearn.frozen.FrozenEstimator`.
from sklearn.datasets import make_classification
from sklearn.frozen import FrozenEstimator
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import FixedThresholdClassifier, train_test_split
X, y = make_classification(n_samples=1000, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
classifier = LogisticRegression().fit(X_train, y_train)
print(
"Probability estimates for three data points:\n"
f"{classifier.predict_proba(X_test[-3:]).round(3)}"
)
print(
"Predicted class for the same three data points:\n"
f"{classifier.predict(X_test[-3:])}"
)
# %%
# Now imagine you'd want to set a different decision threshold on the probability
# estimates. We can do this by wrapping the classifier with
# :class:`~sklearn.frozen.FrozenEstimator` and passing it to
# :class:`~sklearn.model_selection.FixedThresholdClassifier`.
threshold_classifier = FixedThresholdClassifier(
estimator=FrozenEstimator(classifier), threshold=0.9
)
# %%
# Note that in the above piece of code, calling `fit` on
# :class:`~sklearn.model_selection.FixedThresholdClassifier` does not refit the
# underlying classifier.
#
# Now, let's see how the predictions changed with respect to the probability
# threshold.
print(
"Probability estimates for three data points with FixedThresholdClassifier:\n"
f"{threshold_classifier.predict_proba(X_test[-3:]).round(3)}"
)
print(
"Predicted class for the same three data points with FixedThresholdClassifier:\n"
f"{threshold_classifier.predict(X_test[-3:])}"
)
# %%
# We see that the probability estimates stay the same, but since a different decision
# threshold is used, the predicted classes are different.
#
# Please refer to
# :ref:`sphx_glr_auto_examples_model_selection_plot_cost_sensitive_learning.py`
# to learn about cost-sensitive learning and decision threshold tuning.
# %%
# Calibration of a pre-fitted classifier
# --------------------------------------
# You can use :class:`~sklearn.frozen.FrozenEstimator` to calibrate a pre-fitted
# classifier using :class:`~sklearn.calibration.CalibratedClassifierCV`.
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import brier_score_loss
calibrated_classifier = CalibratedClassifierCV(
estimator=FrozenEstimator(classifier)
).fit(X_train, y_train)
prob_pos_clf = classifier.predict_proba(X_test)[:, 1]
clf_score = brier_score_loss(y_test, prob_pos_clf)
print(f"No calibration: {clf_score:.3f}")
prob_pos_calibrated = calibrated_classifier.predict_proba(X_test)[:, 1]
calibrated_score = brier_score_loss(y_test, prob_pos_calibrated)
print(f"With calibration: {calibrated_score:.3f}")
|
"""
This module provides dynamic access to deprecated JSON tools in LangChain.
It ensures backward compatibility by forwarding references such as
`JsonGetValueTool`, `JsonListKeysTool`, and `JsonSpec` to their updated
locations within the `langchain_community.tools` namespace.
This setup allows legacy code to continue working while guiding developers
toward using the updated module paths.
"""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JsonGetValueTool, JsonListKeysTool
from langchain_community.tools.json.tool import JsonSpec
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"JsonSpec": "langchain_community.tools.json.tool",
"JsonListKeysTool": "langchain_community.tools",
"JsonGetValueTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""
Dynamically retrieve attributes from the updated module path.
This method is used to resolve deprecated attribute imports
at runtime and forward them to their new locations.
Args:
name (str): The name of the attribute to import.
Returns:
Any: The resolved attribute from the appropriate updated module.
"""
return _import_attribute(name)
__all__ = [
"JsonGetValueTool",
"JsonListKeysTool",
"JsonSpec",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import JsonGetValueTool, JsonListKeysTool
from langchain_community.tools.json.tool import JsonSpec
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"JsonSpec": "langchain_community.tools.json.tool",
"JsonListKeysTool": "langchain_community.tools",
"JsonGetValueTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JsonGetValueTool",
"JsonListKeysTool",
"JsonSpec",
]
|
# ruff: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "2.16.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# flake8: noqa
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__version__ = "2.16.2.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_arrow_dataset.concatenate_datasets = concatenate_datasets
_utils.DownloadConfig = DownloadConfig
_utils.DownloadManager = DownloadManager
_utils.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadConfig = DownloadConfig
_deprecated_download_manager.DownloadMode = DownloadMode
_deprecated_download_manager.DownloadManager = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmengine import Config, DictAction
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if __name__ == '__main__':
main()
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/training/overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List
import torch
@dataclass
class SentenceTransformerDataCollator:
"""Collator for a SentenceTransformers model.
This encodes the text columns to {column}_input_ids and {column}_attention_mask columns.
This works with the two text dataset that is used as the example in the training overview:
https://www.sbert.net/docs/training/overview.html
"""
tokenize_fn: Callable
valid_label_columns: List[str] = field(default_factory=lambda: ["label", "score"])
def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]:
columns = list(features[0].keys())
# We should always be able to return a loss, label or not:
batch = {"return_loss": True}
if "dataset_name" in columns:
columns.remove("dataset_name")
batch["dataset_name"] = features[0]["dataset_name"]
# Extract the label column if it exists
for label_column in self.valid_label_columns:
if label_column in columns:
batch["label"] = torch.tensor([row[label_column] for row in features])
columns.remove(label_column)
break
# Extract the feature columns
for column in columns:
tokenized = self.tokenize_fn([row[column] for row in features])
for key, value in tokenized.items():
batch[f"{column}_{key}"] = value
return batch
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.plugins import DropBlock
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
|
import pytest
import torch
from mmdet.models.plugins import DropBlock
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
|
import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: str
is_storage: bool
length: int
_weights_only_unpickler._add_safe_globals([_Entry])
class _PseudoZipFile:
def __init__(self) -> None:
self.records: dict[str, tuple[object, int]] = {}
def write_record(self, key: str, data: object, length: int) -> None:
self.records[key] = (data, length)
def write_to(self, f: BufferedIOBase) -> None:
entries = []
for key, (data, length) in self.records.items():
entries.append(
_Entry(
key=key,
is_storage=isinstance(data, torch.UntypedStorage),
length=length,
)
)
pickle.dump(entries, f, protocol=DEFAULT_PROTOCOL)
for key, (data, length) in self.records.items():
if isinstance(data, bytes):
f.write(data)
elif isinstance(data, str):
f.write(data.encode("utf-8"))
elif isinstance(data, torch.UntypedStorage):
data._write_file(f, False, False, 1)
else:
raise TypeError(f"unknown type: {type(data)}")
def read_from(self, f: BufferedIOBase) -> None:
entries = _weights_only_unpickler.load(f)
for entry in entries:
data = f.read(entry.length)
if entry.is_storage:
storage = torch.frombuffer(
data,
dtype=torch.uint8,
).untyped_storage()
self.records[entry.key] = (
storage,
entry.length,
)
else:
self.records[entry.key] = (data, entry.length)
def has_record(self, key: str) -> bool:
return key in self.records
def get_record(self, key: str) -> object:
return self.records[key][0]
def get_storage_from_record(
self, key: str, _length: int, _type: int
) -> torch.Tensor:
return torch.tensor(self.records[key][0], dtype=torch.uint8)
def serialization_id(self) -> str:
return "torchft"
def _streaming_save(
obj: object,
f: BufferedIOBase,
pickle_module: Any = pickle,
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Save the object to a file-like object in a streaming fashion compatible with
network sockets.
This behaves similarly to :func:`torch.save` with a few notable differences:
* A non-seekable file like object can be used when loading.
* No forwards/backwards compatiblity is provided for the serialization
format. This is only intended to be used with a single version of PyTorch
with transient storage (i.e. sockets or temp files).
* mmap is not supported
See :func:`torch.save` for more details on specific arguments.
"""
zip_file = _PseudoZipFile()
_save(
obj,
zip_file=zip_file,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
_disable_byteorder_record=False,
)
zip_file.write_to(f)
def _streaming_load(
f: BufferedIOBase,
map_location: MAP_LOCATION = None,
pickle_module: Any = None,
*,
weights_only: bool = True,
**pickle_load_args: Any,
) -> object:
"""
Load the object from a file-like object in a streaming fashion compatible with
network sockets.
See :func:`_streaming_save` for more details about the streaming behavior.
See :func:`torch.load` for more details on specific arguments.
"""
if weights_only:
if pickle_module is not None:
raise RuntimeError(
"Can not safely load weights when explicit pickle_module is specified"
)
pickle_module = _weights_only_unpickler
else:
if pickle_module is None:
pickle_module = pickle
if "encoding" not in pickle_load_args.keys():
pickle_load_args["encoding"] = "utf-8"
zip_file = _PseudoZipFile()
zip_file.read_from(f)
return _load(
zip_file=zip_file,
map_location=map_location,
pickle_module=pickle_module,
**pickle_load_args,
)
|
import pickle
from dataclasses import dataclass
from io import BufferedIOBase
from typing import Any
import torch
import torch._weights_only_unpickler as _weights_only_unpickler
from torch.serialization import _load, _save, DEFAULT_PROTOCOL, MAP_LOCATION
__all__: list[str] = []
@dataclass
class _Entry:
key: str
is_storage: bool
length: int
_weights_only_unpickler._add_safe_globals([_Entry])
class _PseudoZipFile:
def __init__(self) -> None:
self.records: dict[str, tuple[object, int]] = {}
def write_record(self, key: str, data: object, length: int) -> None:
self.records[key] = (data, length)
def write_to(self, f: BufferedIOBase) -> None:
entries = []
for key, (data, length) in self.records.items():
entries.append(
_Entry(
key=key,
is_storage=isinstance(data, torch.UntypedStorage),
length=length,
)
)
pickle.dump(entries, f, protocol=DEFAULT_PROTOCOL)
for key, (data, length) in self.records.items():
if isinstance(data, bytes):
f.write(data)
elif isinstance(data, str):
f.write(data.encode("utf-8"))
elif isinstance(data, torch.UntypedStorage):
data._write_file(f, False, False, 1)
else:
raise TypeError(f"unknown type: {type(data)}")
def read_from(self, f: BufferedIOBase) -> None:
entries = _weights_only_unpickler.load(f)
for entry in entries:
data = f.read(entry.length)
if entry.is_storage:
storage = torch.frombuffer(
data,
dtype=torch.uint8,
).untyped_storage()
self.records[entry.key] = (
storage,
entry.length,
)
else:
self.records[entry.key] = (data, entry.length)
def has_record(self, key: str) -> bool:
return key in self.records
def get_record(self, key: str) -> object:
return self.records[key][0]
def get_storage_from_record(
self, key: str, _length: int, _type: int
) -> torch.Tensor:
return torch.tensor(self.records[key][0], dtype=torch.uint8)
def serialization_id(self) -> str:
return "torchft"
def _streaming_save(
obj: object,
f: BufferedIOBase,
pickle_module: Any = pickle,
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Save the object to a file-like object in a streaming fashion compatible with
network sockets.
This behaves similarly to :func:`torch.save` with a few notable differences:
* A non-seekable file like object can be used when loading.
* No forwards/backwards compatibility is provided for the serialization
format. This is only intended to be used with a single version of PyTorch
with transient storage (i.e. sockets or temp files).
* mmap is not supported
See :func:`torch.save` for more details on specific arguments.
"""
zip_file = _PseudoZipFile()
_save(
obj,
zip_file=zip_file,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
_disable_byteorder_record=False,
)
zip_file.write_to(f)
def _streaming_load(
f: BufferedIOBase,
map_location: MAP_LOCATION = None,
pickle_module: Any = None,
*,
weights_only: bool = True,
**pickle_load_args: Any,
) -> object:
"""
Load the object from a file-like object in a streaming fashion compatible with
network sockets.
See :func:`_streaming_save` for more details about the streaming behavior.
See :func:`torch.load` for more details on specific arguments.
"""
if weights_only:
if pickle_module is not None:
raise RuntimeError(
"Can not safely load weights when explicit pickle_module is specified"
)
pickle_module = _weights_only_unpickler
else:
if pickle_module is None:
pickle_module = pickle
if "encoding" not in pickle_load_args.keys():
pickle_load_args["encoding"] = "utf-8"
zip_file = _PseudoZipFile()
zip_file.read_from(f)
return _load(
zip_file=zip_file,
map_location=map_location,
pickle_module=pickle_module,
**pickle_load_args,
)
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import squareplus
from keras.src.ops.nn import tanh_shrink
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.ops.nn import average_pool
from keras.src.ops.nn import batch_normalization
from keras.src.ops.nn import binary_crossentropy
from keras.src.ops.nn import categorical_crossentropy
from keras.src.ops.nn import celu
from keras.src.ops.nn import conv
from keras.src.ops.nn import conv_transpose
from keras.src.ops.nn import ctc_decode
from keras.src.ops.nn import ctc_loss
from keras.src.ops.nn import depthwise_conv
from keras.src.ops.nn import dot_product_attention
from keras.src.ops.nn import elu
from keras.src.ops.nn import gelu
from keras.src.ops.nn import glu
from keras.src.ops.nn import hard_shrink
from keras.src.ops.nn import hard_sigmoid
from keras.src.ops.nn import hard_silu
from keras.src.ops.nn import hard_silu as hard_swish
from keras.src.ops.nn import hard_tanh
from keras.src.ops.nn import leaky_relu
from keras.src.ops.nn import log_sigmoid
from keras.src.ops.nn import log_softmax
from keras.src.ops.nn import max_pool
from keras.src.ops.nn import moments
from keras.src.ops.nn import multi_hot
from keras.src.ops.nn import normalize
from keras.src.ops.nn import one_hot
from keras.src.ops.nn import psnr
from keras.src.ops.nn import relu
from keras.src.ops.nn import relu6
from keras.src.ops.nn import selu
from keras.src.ops.nn import separable_conv
from keras.src.ops.nn import sigmoid
from keras.src.ops.nn import silu
from keras.src.ops.nn import silu as swish
from keras.src.ops.nn import soft_shrink
from keras.src.ops.nn import softmax
from keras.src.ops.nn import softplus
from keras.src.ops.nn import softsign
from keras.src.ops.nn import sparse_categorical_crossentropy
from keras.src.ops.nn import tanh_shrink
|
from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from docarray import Document
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document']) -> None:
for value in values:
self.append(value)
|
from abc import abstractmethod
from typing import Iterator, Iterable, MutableSequence
from .... import Document
class BaseSequenceLikeMixin(MutableSequence[Document]):
"""Implement sequence-like methods"""
def insert(self, index: int, value: 'Document'):
"""Insert `doc` at `index`.
:param index: Position of the insertion.
:param value: The doc needs to be inserted.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.insert(index, value.id)
def append(self, value: 'Document'):
"""Append `doc` to the end of the array.
:param value: The doc needs to be appended.
"""
self._set_doc_by_id(value.id, value)
self._offset2ids.append(value.id)
@abstractmethod
def __eq__(self, other):
...
def __len__(self):
return len(self._offset2ids)
def __iter__(self) -> Iterator['Document']:
for _id in self._offset2ids:
yield self._get_doc_by_id(_id)
@abstractmethod
def __contains__(self, other):
...
def clear(self):
"""Clear the data of :class:`DocumentArray`"""
self._del_all_docs()
def __bool__(self):
"""To simulate ```l = []; if l: ...```
:return: returns true if the length of the array is larger than 0
"""
return len(self) > 0
def extend(self, values: Iterable['Document']) -> None:
for value in values:
self.append(value)
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class AbstractDatasetReader(ABC):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
**kwargs,
):
self.path_or_paths = path_or_paths
self.split = split if split or isinstance(path_or_paths, dict) else "train"
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class AbstractDatasetInputStream(ABC):
def __init__(
self,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
streaming: bool = False,
**kwargs,
):
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.streaming = streaming
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, IterableDataset]:
pass
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import DatasetDict, Features, NamedSplit
from ..arrow_dataset import Dataset
from ..utils.typing import NestedDataStructureLike, PathLike
class AbstractDatasetReader(ABC):
def __init__(
self,
path_or_paths: NestedDataStructureLike[PathLike],
split: Optional[NamedSplit] = None,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
self.path_or_paths = path_or_paths
self.split = split if split or isinstance(path_or_paths, dict) else "train"
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.kwargs = kwargs
@abstractmethod
def read(self) -> Union[Dataset, DatasetDict]:
pass
class AbstractDatasetInputStream(ABC):
def __init__(
self,
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
self.features = features
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.kwargs = kwargs
@abstractmethod
def read(self) -> Dataset:
pass
|
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
get_sinusoid,
get_wav_data,
save_wav,
skipIfNoSox,
TempDirMixin,
TorchaudioTestCase,
)
from .common import load_params
@skipIfNoSox
class SmokeTest(TempDirMixin, TorchaudioTestCase):
"""Run smoke test on various effects
The purpose of this test suite is to verify that sox_effect functionalities do not exhibit
abnormal behaviors.
This test suite should be able to run without any additional tools (such as sox command),
however without such tools, the correctness of each function cannot be verified.
"""
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
"""`apply_effects_tensor` should not crash"""
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
original = get_sinusoid(frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32")
_found, _sr = sox_effects.apply_effects_tensor(original, input_sr, effects)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = "int32"
channels_first = True
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path("input.wav")
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
_found, _sr = sox_effects.apply_effects_file(
input_path, effects, normalize=False, channels_first=channels_first
)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_fileobj(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = "int32"
channels_first = True
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path("input.wav")
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
with open(input_path, "rb") as fileobj:
_found, _sr = sox_effects.apply_effects_file(
fileobj, effects, normalize=False, channels_first=channels_first
)
|
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoSox,
get_wav_data,
get_sinusoid,
save_wav,
)
from .common import (
load_params,
)
@skipIfNoSox
class SmokeTest(TempDirMixin, TorchaudioTestCase):
"""Run smoke test on various effects
The purpose of this test suite is to verify that sox_effect functionalities do not exhibit
abnormal behaviors.
This test suite should be able to run without any additional tools (such as sox command),
however without such tools, the correctness of each function cannot be verified.
"""
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
"""`apply_effects_tensor` should not crash"""
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
original = get_sinusoid(frequency=800, sample_rate=input_sr, n_channels=num_channels, dtype="float32")
_found, _sr = sox_effects.apply_effects_tensor(original, input_sr, effects)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = "int32"
channels_first = True
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path("input.wav")
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
_found, _sr = sox_effects.apply_effects_file(
input_path, effects, normalize=False, channels_first=channels_first
)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_fileobj(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = "int32"
channels_first = True
effects = args["effects"]
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path("input.wav")
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
with open(input_path, "rb") as fileobj:
_found, _sr = sox_effects.apply_effects_file(
fileobj, effects, normalize=False, channels_first=channels_first
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import hard_tanh
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
glu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
hard_tanh,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
import types
from keras.src.activations.activations import celu
from keras.src.activations.activations import elu
from keras.src.activations.activations import exponential
from keras.src.activations.activations import gelu
from keras.src.activations.activations import glu
from keras.src.activations.activations import hard_sigmoid
from keras.src.activations.activations import hard_silu
from keras.src.activations.activations import leaky_relu
from keras.src.activations.activations import linear
from keras.src.activations.activations import log_softmax
from keras.src.activations.activations import mish
from keras.src.activations.activations import relu
from keras.src.activations.activations import relu6
from keras.src.activations.activations import selu
from keras.src.activations.activations import sigmoid
from keras.src.activations.activations import silu
from keras.src.activations.activations import softmax
from keras.src.activations.activations import softplus
from keras.src.activations.activations import softsign
from keras.src.activations.activations import tanh
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
ALL_OBJECTS = {
relu,
leaky_relu,
relu6,
softmax,
celu,
elu,
selu,
softplus,
softsign,
silu,
gelu,
glu,
tanh,
sigmoid,
exponential,
hard_sigmoid,
hard_silu,
linear,
mish,
log_softmax,
}
ALL_OBJECTS_DICT = {fn.__name__: fn for fn in ALL_OBJECTS}
# Additional aliases
ALL_OBJECTS_DICT["swish"] = silu
ALL_OBJECTS_DICT["hard_swish"] = hard_silu
@keras_export("keras.activations.serialize")
def serialize(activation):
fn_config = serialization_lib.serialize_keras_object(activation)
if "config" not in fn_config:
raise ValueError(
f"Unknown activation function '{activation}' cannot be "
"serialized due to invalid function name. Make sure to use "
"an activation name that matches the references defined in "
"activations.py or use "
"`@keras.saving.register_keras_serializable()`"
"to register any custom activations. "
f"config={fn_config}"
)
if not isinstance(activation, types.FunctionType):
# Case for additional custom activations represented by objects
return fn_config
if (
isinstance(fn_config["config"], str)
and fn_config["config"] not in globals()
):
# Case for custom activation functions from external activations modules
fn_config["config"] = object_registration.get_registered_name(
activation
)
return fn_config
# Case for keras.activations builtins (simply return name)
return fn_config["config"]
@keras_export("keras.activations.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras activation function via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.activations.get")
def get(identifier):
"""Retrieve a Keras activation function via an identifier."""
if identifier is None:
return linear
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
return obj
raise ValueError(
f"Could not interpret activation function identifier: {identifier}"
)
|
"""Base interface class for storing chat history per user."""
import asyncio
from abc import abstractmethod
from typing import List, Optional
from llama_index.core.llms import ChatMessage
from llama_index.core.schema import BaseComponent
class BaseChatStore(BaseComponent):
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "BaseChatStore"
@abstractmethod
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Set messages for a key."""
...
@abstractmethod
def get_messages(self, key: str) -> List[ChatMessage]:
"""Get messages for a key."""
...
@abstractmethod
def add_message(self, key: str, message: ChatMessage) -> None:
"""Add a message for a key."""
...
@abstractmethod
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Delete messages for a key."""
...
@abstractmethod
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
...
@abstractmethod
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
...
@abstractmethod
def get_keys(self) -> List[str]:
"""Get all keys."""
...
async def aset_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Async version of Get messages for a key."""
await asyncio.to_thread(self.set_messages, key, messages)
async def aget_messages(self, key: str) -> List[ChatMessage]:
"""Async version of Get messages for a key."""
return await asyncio.to_thread(self.get_messages, key)
async def async_add_message(self, key: str, message: ChatMessage) -> None:
"""Async version of Add a message for a key."""
await asyncio.to_thread(self.add_message, key, message)
async def adelete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Async version of Delete messages for a key."""
return await asyncio.to_thread(self.delete_messages, key)
async def adelete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Async version of Delete specific message for a key."""
return await asyncio.to_thread(self.delete_message, key, idx)
async def adelete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Async version of Delete last message for a key."""
return await asyncio.to_thread(self.delete_last_message, key)
async def aget_keys(self) -> List[str]:
"""Async version of Get all keys."""
return await asyncio.to_thread(self.get_keys)
|
"""Base interface class for storing chat history per user."""
import asyncio
from abc import abstractmethod
from typing import List, Optional
from llama_index.core.llms import ChatMessage
from llama_index.core.schema import BaseComponent
class BaseChatStore(BaseComponent):
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "BaseChatStore"
@abstractmethod
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Set messages for a key."""
...
@abstractmethod
def get_messages(self, key: str) -> List[ChatMessage]:
"""Get messages for a key."""
...
@abstractmethod
def add_message(self, key: str, message: ChatMessage) -> None:
"""Add a message for a key."""
...
@abstractmethod
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Delete messages for a key."""
...
@abstractmethod
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
...
@abstractmethod
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
...
@abstractmethod
def get_keys(self) -> List[str]:
"""Get all keys."""
...
async def aset_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Async version of Get messages for a key."""
await asyncio.to_thread(self.set_messages, key, messages)
async def aget_messages(self, key: str) -> List[ChatMessage]:
"""Async version of Get messages for a key."""
return await asyncio.to_thread(self.get_messages, key)
async def async_add_message(self, key: str, message: ChatMessage) -> None:
"""Async version of Add a message for a key."""
await asyncio.to_thread(self.add_message, key, message)
async def adelete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Async version of Delete messages for a key."""
return await asyncio.to_thread(self.delete_messages, key)
async def adelete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Async version of Delete specific message for a key."""
return await asyncio.to_thread(self.delete_message, key, idx)
async def adelete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Async version of Delete last message for a key."""
return await asyncio.to_thread(self.delete_last_message, key)
async def aget_keys(self) -> List[str]:
"""Async version of Get all keys."""
return await asyncio.to_thread(self.get_keys)
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_chinese_clip import *
from .feature_extraction_chinese_clip import *
from .image_processing_chinese_clip import *
from .image_processing_chinese_clip_fast import *
from .modeling_chinese_clip import *
from .processing_chinese_clip import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule
from ...utils.import_utils import define_import_structure
if TYPE_CHECKING:
from .configuration_chinese_clip import *
from .feature_extraction_chinese_clip import *
from .image_processing_chinese_clip import *
from .modeling_chinese_clip import *
from .processing_chinese_clip import *
else:
import sys
_file = globals()["__file__"]
sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
|
import dataclasses
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.executors import BaseExecutor
from jina.serve.executors.metas import get_default_metas
class ExecutorLegacyParser(BaseLegacyParser):
"""Legacy parser for executor."""
def parse(
self,
cls: Type['BaseExecutor'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseExecutor':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Flow YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
_meta_config = get_default_metas()
_meta_config.update(data.get('metas', {}))
if _meta_config:
data['metas'] = _meta_config
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
if dataclasses.is_dataclass(cls):
obj = cls(
**data.get('with', {}),
)
cls.__bases__[0].__init__(
obj,
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
else:
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
# check if the yaml file used to instanciate 'cls' has arguments that are not in 'cls'
arguments_from_cls = ExecutorLegacyParser._get_all_arguments(cls)
arguments_from_yaml = set(data.get('with', {}))
difference_set = arguments_from_yaml - arguments_from_cls
# only log warnings about unknown args for main Pod
if any(difference_set) and not ExecutorLegacyParser.is_tail_or_head(data):
default_logger.warning(
f'The given arguments {difference_set} are not defined in `{cls.__name__}.__init__`'
)
if not _meta_config:
default_logger.warning(
'"metas" config is not found in this yaml file, '
'this map is important as it provides an unique identifier when '
'persisting the executor on disk.'
)
# for compound executor
if 'components' in data:
obj.components = lambda: data['components']
obj.is_updated = False
return obj
@staticmethod
def is_tail_or_head(data: Dict) -> bool:
"""Based on name, compute if this is a tail/head Pod or a main Pod
:param data: the data for the parser
:return: True if it is tail/head, False otherwise
"""
try:
name = data.get('runtime_args', {}).get('name', '')
return 'head' in name or 'tail' in name
except Exception as _:
pass # name can be None in tests since it's not passed
def dump(self, data: 'BaseExecutor') -> Dict:
"""
:param data: versioned executor object
:return: the dictionary given a versioned flow object
"""
# note: we only save non-default property for the sake of clarity
_defaults = get_default_metas()
p = (
{
k: getattr(data.metas, k)
for k, v in _defaults.items()
if getattr(data.metas, k) != v
}
if hasattr(data, 'metas')
else {}
)
a = {k: v for k, v in data._init_kwargs_dict.items() if k not in _defaults}
r = {}
if a:
r['with'] = a
if p:
r['metas'] = p
if hasattr(data, 'requests'):
r['requests'] = {k: v.fn.__name__ for k, v in data.requests.items()}
if hasattr(data, 'dynamic_batching'):
r['dynamic_batching'] = data.dynamic_batching
if hasattr(data, 'components'):
r['components'] = data.components
return r
|
import dataclasses
from typing import Any, Dict, Optional, Type
from jina.jaml.parsers.base import BaseLegacyParser
from jina.serve.executors import BaseExecutor
from jina.serve.executors.metas import get_default_metas
class ExecutorLegacyParser(BaseLegacyParser):
"""Legacy parser for executor."""
def parse(
self,
cls: Type['BaseExecutor'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseExecutor':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Flow YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
_meta_config = get_default_metas()
_meta_config.update(data.get('metas', {}))
if _meta_config:
data['metas'] = _meta_config
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
if dataclasses.is_dataclass(cls):
obj = cls(
**data.get('with', {}),
)
cls.__bases__[0].__init__(
obj,
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
else:
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
# check if the yaml file used to instanciate 'cls' has arguments that are not in 'cls'
arguments_from_cls = ExecutorLegacyParser._get_all_arguments(cls)
arguments_from_yaml = set(data.get('with', {}))
difference_set = arguments_from_yaml - arguments_from_cls
# only log warnings about unknown args for main Pod
if any(difference_set) and not ExecutorLegacyParser.is_tail_or_head(data):
default_logger.warning(
f'The given arguments {difference_set} are not defined in `{cls.__name__}.__init__`'
)
if not _meta_config:
default_logger.warning(
'"metas" config is not found in this yaml file, '
'this map is important as it provides an unique identifier when '
'persisting the executor on disk.'
)
# for compound executor
if 'components' in data:
obj.components = lambda: data['components']
obj.is_updated = False
return obj
@staticmethod
def is_tail_or_head(data: Dict) -> bool:
"""Based on name, compute if this is a tail/head Pod or a main Pod
:param data: the data for the parser
:return: True if it is tail/head, False otherwise
"""
try:
name = data.get('runtime_args', {}).get('name', '')
return 'head' in name or 'tail' in name
except Exception as _:
pass # name can be None in tests since it's not passed
def dump(self, data: 'BaseExecutor') -> Dict:
"""
:param data: versioned executor object
:return: the dictionary given a versioned flow object
"""
# note: we only save non-default property for the sake of clarity
_defaults = get_default_metas()
p = (
{
k: getattr(data.metas, k)
for k, v in _defaults.items()
if getattr(data.metas, k) != v
}
if hasattr(data, 'metas')
else {}
)
a = {k: v for k, v in data._init_kwargs_dict.items() if k not in _defaults}
r = {}
if a:
r['with'] = a
if p:
r['metas'] = p
if hasattr(data, 'requests'):
r['requests'] = {k: v.__name__ for k, v in data.requests.items()}
if hasattr(data, 'dynamic_batching'):
r['dynamic_batching'] = data.dynamic_batching
if hasattr(data, 'components'):
r['components'] = data.components
return r
|
import pytest
from docarray import DocumentArray
from docarray.array.opensearch import DocumentArrayOpenSearch, OpenSearchConfig
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
from docarray.array.milvus import DocumentArrayMilvus, MilvusConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=5)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=5)),
(DocumentArrayQdrant, QdrantConfig(n_dim=5)),
(DocumentArrayElastic, ElasticConfig(n_dim=5)),
(DocumentArrayRedis, RedisConfig(n_dim=5)),
(DocumentArrayMilvus, MilvusConfig(n_dim=5)),
(DocumentArrayOpenSearch, OpenSearchConfig(n_dim=5)),
],
)
def test_empty_non_zero(da_cls, config, start_storage):
# Assert .empty provides a da with 0 docs
if config:
da = da_cls.empty(config=config)
else:
da = da_cls.empty()
assert len(da) == 0
# Assert .empty provides a da of the correct length
if config:
da = da_cls.empty(10, config=config)
else:
da = da_cls.empty(10)
assert len(da) == 10
|
import pytest
from docarray import DocumentArray
from docarray.array.qdrant import DocumentArrayQdrant
from docarray.array.sqlite import DocumentArraySqlite
from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig
from docarray.array.storage.qdrant import QdrantConfig
from docarray.array.storage.weaviate import WeaviateConfig
from docarray.array.weaviate import DocumentArrayWeaviate
from docarray.array.elastic import DocumentArrayElastic, ElasticConfig
from docarray.array.redis import DocumentArrayRedis, RedisConfig
from docarray.array.milvus import DocumentArrayMilvus, MilvusConfig
@pytest.mark.parametrize(
'da_cls,config',
[
(DocumentArray, None),
(DocumentArraySqlite, None),
(DocumentArrayAnnlite, AnnliteConfig(n_dim=5)),
(DocumentArrayWeaviate, WeaviateConfig(n_dim=5)),
(DocumentArrayQdrant, QdrantConfig(n_dim=5)),
(DocumentArrayElastic, ElasticConfig(n_dim=5)),
(DocumentArrayRedis, RedisConfig(n_dim=5)),
(DocumentArrayMilvus, MilvusConfig(n_dim=5)),
],
)
def test_empty_non_zero(da_cls, config, start_storage):
# Assert .empty provides a da with 0 docs
if config:
da = da_cls.empty(config=config)
else:
da = da_cls.empty()
assert len(da) == 0
# Assert .empty provides a da of the correct length
if config:
da = da_cls.empty(10, config=config)
else:
da = da_cls.empty(10)
assert len(da) == 10
|
"""Test EmbaasEmbeddings embeddings"""
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.embeddings import PremAIEmbeddings
@pytest.mark.requires("premai")
def test_api_key_is_string() -> None:
llm = PremAIEmbeddings( # type: ignore[call-arg]
premai_api_key="secret-api-key", # type: ignore[arg-type]
project_id=8,
model="fake-model",
)
assert isinstance(llm.premai_api_key, SecretStr)
@pytest.mark.requires("premai")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = PremAIEmbeddings( # type: ignore[call-arg]
premai_api_key="secret-api-key", # type: ignore[arg-type]
project_id=8,
model="fake-model",
)
print(llm.premai_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
"""Test EmbaasEmbeddings embeddings"""
import pytest
from pydantic import SecretStr
from pytest import CaptureFixture
from langchain_community.embeddings import PremAIEmbeddings
@pytest.mark.requires("premai")
def test_api_key_is_string() -> None:
llm = PremAIEmbeddings( # type: ignore[call-arg]
premai_api_key="secret-api-key", # type: ignore[arg-type]
project_id=8,
model="fake-model", # type: ignore[arg-type]
)
assert isinstance(llm.premai_api_key, SecretStr)
@pytest.mark.requires("premai")
def test_api_key_masked_when_passed_via_constructor(
capsys: CaptureFixture,
) -> None:
llm = PremAIEmbeddings( # type: ignore[call-arg]
premai_api_key="secret-api-key", # type: ignore[arg-type]
project_id=8,
model="fake-model", # type: ignore[arg-type]
)
print(llm.premai_api_key, end="") # noqa: T201
captured = capsys.readouterr()
assert captured.out == "**********"
|
class MissingConfigError(Exception):
"""The attempted operation requires configuration which is not available"""
class NotFoundError(ValueError):
"""The requested record was not found, resulting in an error condition"""
class NeedConfirmation(Exception):
"""The user must explicitly confirm that they want to proceed"""
class InsufficientBalanceError(ValueError):
user_id: str
message: str
balance: float
amount: float
def __init__(self, message: str, user_id: str, balance: float, amount: float):
super().__init__(message)
self.args = (message, user_id, balance, amount)
self.message = message
self.user_id = user_id
self.balance = balance
self.amount = amount
def __str__(self):
"""Used to display the error message in the frontend, because we str() the error when sending the execution update"""
return self.message
|
class MissingConfigError(Exception):
"""The attempted operation requires configuration which is not available"""
class NeedConfirmation(Exception):
"""The user must explicitly confirm that they want to proceed"""
class InsufficientBalanceError(ValueError):
user_id: str
message: str
balance: float
amount: float
def __init__(self, message: str, user_id: str, balance: float, amount: float):
super().__init__(message)
self.args = (message, user_id, balance, amount)
self.message = message
self.user_id = user_id
self.balance = balance
self.amount = amount
def __str__(self):
"""Used to display the error message in the frontend, because we str() the error when sending the execution update"""
return self.message
|
from google.protobuf import __version__ as __pb__version__
from jina._docarray import docarray_v2 as is_docarray_v2
if __pb__version__.startswith('4'):
if is_docarray_v2:
from jina.proto.docarray_v2.pb.jina_pb2 import *
else:
from jina.proto.docarray_v1.pb.jina_pb2 import *
else:
if is_docarray_v2:
from jina.proto.docarray_v2.pb2.jina_pb2 import *
else:
from jina.proto.docarray_v1.pb2.jina_pb2 import *
|
from google.protobuf import __version__ as __pb__version__
from jina._docarray import docarray_v2 as is_docarray_v2
if __pb__version__.startswith('4'):
if is_docarray_v2:
from .docarray_v2.pb.jina_pb2 import *
else:
from .docarray_v1.pb.jina_pb2 import *
else:
if is_docarray_v2:
from .docarray_v2.pb2.jina_pb2 import *
else:
from .docarray_v1.pb2.jina_pb2 import *
|
import types
from typing import TYPE_CHECKING
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.audio.audio_jax_array import AudioJaxArray # noqa
from docarray.typing.tensor.audio.audio_tensorflow_tensor import ( # noqa
AudioTensorFlowTensor,
)
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__ = ['AudioNdArray', 'AudioTensor', 'AudioJaxArray']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'AudioTorchTensor':
import_library('torch', raise_error=True)
import docarray.typing.tensor.audio.audio_torch_tensor as lib
elif name == 'AudioTensorFlowTensor':
import_library('tensorflow', raise_error=True)
import docarray.typing.tensor.audio.audio_tensorflow_tensor as lib
elif name == 'AudioJaxArray':
import_library('jax', raise_error=True)
import docarray.typing.tensor.audio.audio_jax_array as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
import types
from typing import TYPE_CHECKING
from docarray.typing.tensor.audio.audio_ndarray import AudioNdArray
from docarray.typing.tensor.audio.audio_tensor import AudioTensor
from docarray.utils._internal.misc import (
_get_path_from_docarray_root_level,
import_library,
)
if TYPE_CHECKING:
from docarray.typing.tensor.audio.audio_tensorflow_tensor import ( # noqa
AudioTensorFlowTensor,
)
from docarray.typing.tensor.audio.audio_torch_tensor import AudioTorchTensor # noqa
__all__ = ['AudioNdArray', 'AudioTensor']
def __getattr__(name: str):
lib: types.ModuleType
if name == 'AudioTorchTensor':
import_library('torch', raise_error=True)
import docarray.typing.tensor.audio.audio_torch_tensor as lib
elif name == 'AudioTensorFlowTensor':
import_library('tensorflow', raise_error=True)
import docarray.typing.tensor.audio.audio_tensorflow_tensor as lib
else:
raise ImportError(
f'cannot import name \'{name}\' from \'{_get_path_from_docarray_root_level(__file__)}\''
)
tensor_cls = getattr(lib, name)
if name not in __all__:
__all__.append(name)
return tensor_cls
|
# coding=utf-8
# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for RAG."""
import os
import warnings
from typing import Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
logger = logging.get_logger(__name__)
class RagTokenizer:
def __init__(self, question_encoder, generator):
self.question_encoder = question_encoder
self.generator = generator
self.current_tokenizer = self.question_encoder
def save_pretrained(self, save_directory):
if os.path.isfile(save_directory):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
question_encoder_path = os.path.join(save_directory, "question_encoder_tokenizer")
generator_path = os.path.join(save_directory, "generator_tokenizer")
self.question_encoder.save_pretrained(question_encoder_path)
self.generator.save_pretrained(generator_path)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
config = kwargs.pop("config", None)
if config is None:
config = RagConfig.from_pretrained(pretrained_model_name_or_path)
question_encoder = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, config=config.question_encoder, subfolder="question_encoder_tokenizer"
)
generator = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, config=config.generator, subfolder="generator_tokenizer"
)
return cls(question_encoder=question_encoder, generator=generator)
def __call__(self, *args, **kwargs):
return self.current_tokenizer(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
return self.generator.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.generator.decode(*args, **kwargs)
def _switch_to_input_mode(self):
self.current_tokenizer = self.question_encoder
def _switch_to_target_mode(self):
self.current_tokenizer = self.generator
def prepare_seq2seq_batch(
self,
src_texts: list[str],
tgt_texts: Optional[list[str]] = None,
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: Optional[str] = None,
truncation: bool = True,
**kwargs,
) -> BatchEncoding:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details",
FutureWarning,
)
if max_length is None:
max_length = self.current_tokenizer.model_max_length
model_inputs = self(
src_texts,
add_special_tokens=True,
return_tensors=return_tensors,
max_length=max_length,
padding=padding,
truncation=truncation,
**kwargs,
)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
max_target_length = self.current_tokenizer.model_max_length
labels = self(
text_target=tgt_texts,
add_special_tokens=True,
return_tensors=return_tensors,
padding=padding,
max_length=max_target_length,
truncation=truncation,
**kwargs,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
__all__ = ["RagTokenizer"]
|
# coding=utf-8
# Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for RAG."""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
logger = logging.get_logger(__name__)
class RagTokenizer:
def __init__(self, question_encoder, generator):
self.question_encoder = question_encoder
self.generator = generator
self.current_tokenizer = self.question_encoder
def save_pretrained(self, save_directory):
if os.path.isfile(save_directory):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
question_encoder_path = os.path.join(save_directory, "question_encoder_tokenizer")
generator_path = os.path.join(save_directory, "generator_tokenizer")
self.question_encoder.save_pretrained(question_encoder_path)
self.generator.save_pretrained(generator_path)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
config = kwargs.pop("config", None)
if config is None:
config = RagConfig.from_pretrained(pretrained_model_name_or_path)
question_encoder = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, config=config.question_encoder, subfolder="question_encoder_tokenizer"
)
generator = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path, config=config.generator, subfolder="generator_tokenizer"
)
return cls(question_encoder=question_encoder, generator=generator)
def __call__(self, *args, **kwargs):
return self.current_tokenizer(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
return self.generator.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.generator.decode(*args, **kwargs)
def _switch_to_input_mode(self):
self.current_tokenizer = self.question_encoder
def _switch_to_target_mode(self):
self.current_tokenizer = self.generator
def prepare_seq2seq_batch(
self,
src_texts: List[str],
tgt_texts: Optional[List[str]] = None,
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: Optional[str] = None,
truncation: bool = True,
**kwargs,
) -> BatchEncoding:
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details",
FutureWarning,
)
if max_length is None:
max_length = self.current_tokenizer.model_max_length
model_inputs = self(
src_texts,
add_special_tokens=True,
return_tensors=return_tensors,
max_length=max_length,
padding=padding,
truncation=truncation,
**kwargs,
)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
max_target_length = self.current_tokenizer.model_max_length
labels = self(
text_target=tgt_texts,
add_special_tokens=True,
return_tensors=return_tensors,
padding=padding,
max_length=max_target_length,
truncation=truncation,
**kwargs,
)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
__all__ = ["RagTokenizer"]
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict, Iterable, Optional
import torch
from jina import DocumentArray, Executor, requests
from jina_commons.batching import get_docs_batch_generator
from sentence_transformers import SentenceTransformer
class TransformerSentenceEncoder(Executor):
"""
Encode the Document text into embedding.
:param embedding_dim: the output dimensionality of the embedding
"""
def __init__(
self,
model_name: str = 'all-MiniLM-L6-v2',
device: str = 'cpu',
default_traversal_paths: Iterable[str] = ('r',),
default_batch_size: int = 32,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.model = SentenceTransformer(model_name, device=device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with text and store the encodings in the ``embedding`` attribute
of the docs.
:param docs: Documents to send to the encoder. They need to have the ``text``
attribute get an embedding.
:param parameters: Any additional parameters for the `encode` function.
"""
for batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get(
'traversal_paths', self.default_traversal_paths
),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text',
):
texts = batch.get_attributes('text')
with torch.no_grad():
embeddings = self.model.encode(texts)
for doc, embedding in zip(batch, embeddings):
doc.embedding = embedding
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Optional, Dict, List, Tuple
from jina import Executor, DocumentArray, requests
from sentence_transformers import SentenceTransformer
from jina_commons.batching import get_docs_batch_generator
class TransformerSentenceEncoder(Executor):
"""
Encode the Document text into embedding.
:param embedding_dim: the output dimensionality of the embedding
"""
def __init__(
self,
model_name: str = 'sentence-transformers/paraphrase-mpnet-base-v2',
device: str = "cpu",
default_traversal_paths: Tuple = ('r', ),
default_batch_size=32,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.default_batch_size = default_batch_size
self.default_traversal_paths = default_traversal_paths
self.model = SentenceTransformer(model_name, device=device)
@requests
def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):
"""
Encode all docs with images and store the encodings in the embedding attribute of the docs.
:param docs: documents sent to the encoder. The docs must have `blob` of the shape `256`.
:param parameters: Any additional parameters for the `encode` function.
"""
for batch in get_docs_batch_generator(
docs,
traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),
batch_size=parameters.get('batch_size', self.default_batch_size),
needs_attr='text'
):
texts = batch.get_attributes("text")
embeddings = self.model.encode(texts)
for doc, embedding in zip(batch, embeddings):
doc.embedding = embedding
|
# flake8: noqa
import os
# Set backend env to torch
os.environ["KERAS_BACKEND"] = "torch"
import torch
import torch.nn as nn
import torch.optim as optim
from keras import layers
import keras
import numpy as np
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
learning_rate = 0.01
batch_size = 64
num_epochs = 1
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Create the Keras model
model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
#################################################################
######## Writing a torch training loop for a Keras model ########
#################################################################
# Instantiate the torch optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
def train(model, train_loader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
# Print loss statistics
if (batch_idx + 1) % 10 == 0:
print(
f"Epoch [{epoch + 1}/{num_epochs}], "
f"Batch [{batch_idx + 1}/{len(train_loader)}], "
f"Loss: {running_loss / 10}"
)
running_loss = 0.0
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
# Create a DataLoader
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
train(model, train_loader, num_epochs, optimizer, loss_fn)
################################################################
######## Using a Keras model or layer in a torch Module ########
################################################################
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
def forward(self, x):
return self.model(x)
torch_module = MyModel()
# Instantiate the torch optimizer
optimizer = optim.Adam(torch_module.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
train(torch_module, train_loader, num_epochs, optimizer, loss_fn)
|
# flake8: noqa
import os
# Set backend env to torch
os.environ["KERAS_BACKEND"] = "torch"
import torch
import torch.nn as nn
import torch.optim as optim
from keras import layers
import keras
import numpy as np
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
learning_rate = 0.01
batch_size = 64
num_epochs = 1
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# Create the Keras model
model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
#################################################################
######## Writing a torch training loop for a Keras model ########
#################################################################
# Instantiate the torch optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
def train(model, train_loader, num_epochs, optimizer, loss_fn):
for epoch in range(num_epochs):
running_loss = 0.0
for batch_idx, (inputs, targets) in enumerate(train_loader):
# Forward pass
outputs = model(inputs)
loss = loss_fn(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
# Print loss statistics
if (batch_idx + 1) % 10 == 0:
print(
f"Epoch [{epoch+1}/{num_epochs}], "
f"Batch [{batch_idx+1}/{len(train_loader)}], "
f"Loss: {running_loss / 10}"
)
running_loss = 0.0
# Create a TensorDataset
dataset = torch.utils.data.TensorDataset(
torch.from_numpy(x_train), torch.from_numpy(y_train)
)
# Create a DataLoader
train_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
train(model, train_loader, num_epochs, optimizer, loss_fn)
################################################################
######## Using a Keras model or layer in a torch Module ########
################################################################
class MyModel(nn.Module):
def __init__(self):
super().__init__()
self.model = keras.Sequential(
[
layers.Input(shape=(28, 28, 1)),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes),
]
)
def forward(self, x):
return self.model(x)
torch_module = MyModel()
# Instantiate the torch optimizer
optimizer = optim.Adam(torch_module.parameters(), lr=learning_rate)
# Instantiate the torch loss function
loss_fn = nn.CrossEntropyLoss()
train(torch_module, train_loader, num_epochs, optimizer, loss_fn)
|
def _python_type_to_schema_type(p):
if p == 'str':
dtype = 'string'
elif p == 'int' or p == 'float':
dtype = 'number'
elif p in {'typing.List[str]', 'typing.Tuple[str]', 'list', 'tuple'}:
dtype = 'array'
elif p == 'bool':
dtype = 'boolean'
elif p == 'dict':
dtype = 'object'
else:
dtype = 'null'
# raise TypeError(f'{p} is not supported')
return dtype
def _cli_to_schema(
api_dict,
target,
extras=None,
required=None,
allow_addition=False,
namespace='Jina',
description='',
):
deployment_api = []
if not isinstance(target, list):
target = [target]
for d in api_dict['methods']:
if d['name'] in target:
deployment_api.extend(d['options'])
_schema = {
'properties': {},
'type': 'object',
'required': [],
'additionalProperties': allow_addition,
'description': description,
}
for d in deployment_api:
dtype = _python_type_to_schema_type(d['type'])
pv = {'description': d['help'].strip(), 'type': dtype, 'default': d['default']}
if d['choices']:
pv['enum'] = d['choices']
if d['required']:
_schema['required'].append(d['name'])
if dtype == 'array':
_schema['items'] = {'type': 'string', 'minItems': 1, 'uniqueItems': True}
_schema['properties'][d['name']] = pv
if extras:
_schema['properties'].update(extras)
if required:
_schema['required'].extend(required)
return {f'{namespace}::{target[0].capitalize()}': _schema}
|
def _python_type_to_schema_type(p):
if p == 'str':
dtype = 'string'
elif p == 'int' or p == 'float':
dtype = 'number'
elif p in {'typing.List[str]', 'typing.Tuple[str]', 'list', 'tuple'}:
dtype = 'array'
elif p == 'bool':
dtype = 'boolean'
elif p == 'dict':
dtype = 'object'
else:
dtype = None
# raise TypeError(f'{p} is not supported')
return dtype
def _cli_to_schema(
api_dict,
target,
extras=None,
required=None,
allow_addition=False,
namespace='Jina',
description='',
):
deployment_api = []
if not isinstance(target, list):
target = [target]
for d in api_dict['methods']:
if d['name'] in target:
deployment_api.extend(d['options'])
_schema = {
'properties': {},
'type': 'object',
'required': [],
'additionalProperties': allow_addition,
'description': description,
}
for d in deployment_api:
dtype = _python_type_to_schema_type(d['type'])
pv = {'description': d['help'].strip(), 'type': dtype, 'default': d['default']}
if d['choices']:
pv['enum'] = d['choices']
if d['required']:
_schema['required'].append(d['name'])
if dtype == 'array':
_schema['items'] = {'type': 'string', 'minItems': 1, 'uniqueItems': True}
_schema['properties'][d['name']] = pv
if extras:
_schema['properties'].update(extras)
if required:
_schema['required'].extend(required)
return {f'{namespace}::{target[0].capitalize()}': _schema}
|
import logging
from backend.data import db
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
create_graph_execution,
get_graph_execution,
get_incomplete_node_executions,
get_latest_node_execution,
get_node_execution_results,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
update_node_execution_status,
update_node_execution_status_batch,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
logger = logging.getLogger(__name__)
async def _spend_credits(
user_id: str, cost: int, metadata: UsageTransactionMetadata
) -> int:
return await _user_credit_model.spend_credits(user_id, cost, metadata)
async def _get_credits(user_id: str) -> int:
return await _user_credit_model.get_credits(user_id)
class DatabaseManager(AppService):
def run_service(self) -> None:
logger.info(f"[{self.service_name}] ⏳ Connecting to Database...")
self.run_and_wait(db.connect())
super().run_service()
def cleanup(self):
super().cleanup()
logger.info(f"[{self.service_name}] ⏳ Disconnecting Database...")
self.run_and_wait(db.disconnect())
@classmethod
def get_port(cls) -> int:
return config.database_api_port
# Executions
get_graph_execution = exposed_run_and_wait(get_graph_execution)
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_node_execution_results = exposed_run_and_wait(get_node_execution_results)
get_incomplete_node_executions = exposed_run_and_wait(
get_incomplete_node_executions
)
get_latest_node_execution = exposed_run_and_wait(get_latest_node_execution)
update_node_execution_status = exposed_run_and_wait(update_node_execution_status)
update_node_execution_status_batch = exposed_run_and_wait(
update_node_execution_status_batch
)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
get_credits = exposed_run_and_wait(_get_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
import logging
from backend.data import db
from backend.data.credit import UsageTransactionMetadata, get_user_credit_model
from backend.data.execution import (
create_graph_execution,
get_graph_execution,
get_incomplete_node_executions,
get_latest_node_execution,
get_node_execution_results,
update_graph_execution_start_time,
update_graph_execution_stats,
update_node_execution_stats,
update_node_execution_status,
update_node_execution_status_batch,
upsert_execution_input,
upsert_execution_output,
)
from backend.data.graph import (
get_connected_output_nodes,
get_graph,
get_graph_metadata,
get_node,
)
from backend.data.notifications import (
create_or_add_to_user_notification_batch,
empty_user_notification_batch,
get_all_batches_by_type,
get_user_notification_batch,
get_user_notification_oldest_message_in_batch,
)
from backend.data.user import (
get_active_user_ids_in_timerange,
get_user_email_by_id,
get_user_email_verification,
get_user_integrations,
get_user_metadata,
get_user_notification_preference,
update_user_integrations,
update_user_metadata,
)
from backend.util.service import AppService, exposed_run_and_wait
from backend.util.settings import Config
config = Config()
_user_credit_model = get_user_credit_model()
logger = logging.getLogger(__name__)
async def _spend_credits(
user_id: str, cost: int, metadata: UsageTransactionMetadata
) -> int:
return await _user_credit_model.spend_credits(user_id, cost, metadata)
class DatabaseManager(AppService):
def run_service(self) -> None:
logger.info(f"[{self.service_name}] ⏳ Connecting to Database...")
self.run_and_wait(db.connect())
super().run_service()
def cleanup(self):
super().cleanup()
logger.info(f"[{self.service_name}] ⏳ Disconnecting Database...")
self.run_and_wait(db.disconnect())
@classmethod
def get_port(cls) -> int:
return config.database_api_port
# Executions
get_graph_execution = exposed_run_and_wait(get_graph_execution)
create_graph_execution = exposed_run_and_wait(create_graph_execution)
get_node_execution_results = exposed_run_and_wait(get_node_execution_results)
get_incomplete_node_executions = exposed_run_and_wait(
get_incomplete_node_executions
)
get_latest_node_execution = exposed_run_and_wait(get_latest_node_execution)
update_node_execution_status = exposed_run_and_wait(update_node_execution_status)
update_node_execution_status_batch = exposed_run_and_wait(
update_node_execution_status_batch
)
update_graph_execution_start_time = exposed_run_and_wait(
update_graph_execution_start_time
)
update_graph_execution_stats = exposed_run_and_wait(update_graph_execution_stats)
update_node_execution_stats = exposed_run_and_wait(update_node_execution_stats)
upsert_execution_input = exposed_run_and_wait(upsert_execution_input)
upsert_execution_output = exposed_run_and_wait(upsert_execution_output)
# Graphs
get_node = exposed_run_and_wait(get_node)
get_graph = exposed_run_and_wait(get_graph)
get_connected_output_nodes = exposed_run_and_wait(get_connected_output_nodes)
get_graph_metadata = exposed_run_and_wait(get_graph_metadata)
# Credits
spend_credits = exposed_run_and_wait(_spend_credits)
# User + User Metadata + User Integrations
get_user_metadata = exposed_run_and_wait(get_user_metadata)
update_user_metadata = exposed_run_and_wait(update_user_metadata)
get_user_integrations = exposed_run_and_wait(get_user_integrations)
update_user_integrations = exposed_run_and_wait(update_user_integrations)
# User Comms - async
get_active_user_ids_in_timerange = exposed_run_and_wait(
get_active_user_ids_in_timerange
)
get_user_email_by_id = exposed_run_and_wait(get_user_email_by_id)
get_user_email_verification = exposed_run_and_wait(get_user_email_verification)
get_user_notification_preference = exposed_run_and_wait(
get_user_notification_preference
)
# Notifications - async
create_or_add_to_user_notification_batch = exposed_run_and_wait(
create_or_add_to_user_notification_batch
)
empty_user_notification_batch = exposed_run_and_wait(empty_user_notification_batch)
get_all_batches_by_type = exposed_run_and_wait(get_all_batches_by_type)
get_user_notification_batch = exposed_run_and_wait(get_user_notification_batch)
get_user_notification_oldest_message_in_batch = exposed_run_and_wait(
get_user_notification_oldest_message_in_batch
)
|
import os
from typing import Any, Optional, Dict
from llama_index.llms.openai_like import OpenAILike
class Databricks(OpenAILike):
"""
Databricks LLM.
Examples:
`pip install llama-index-llms-databricks`
```python
from llama_index.llms.databricks import Databricks
# Set up the Databricks class with the required model, API key and serving endpoint
llm = Databricks(model="databricks-dbrx-instruct", api_key="your_api_key", api_base="https://[your-work-space].cloud.databricks.com/serving-endpoints")
# Call the complete method with a query
response = llm.complete("Explain the importance of open source LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("DATABRICKS_TOKEN", None)
api_base = api_base or os.environ.get("DATABRICKS_SERVING_ENDPOINT", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Databricks"
def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
"""Get the kwargs that need to be provided to the model invocation."""
# Fix the input to work with the Databricks API
if "tool_choice" in kwargs and "tools" not in kwargs:
del kwargs["tool_choice"]
return super()._get_model_kwargs(**kwargs)
|
import os
from typing import Any, Optional, Dict
from llama_index.llms.openai_like import OpenAILike
class Databricks(OpenAILike):
"""Databricks LLM.
Examples:
`pip install llama-index-llms-databricks`
```python
from llama_index.llms.databricks import Databricks
# Set up the Databricks class with the required model, API key and serving endpoint
llm = Databricks(model="databricks-dbrx-instruct", api_key="your_api_key", api_base="https://[your-work-space].cloud.databricks.com/serving-endpoints")
# Call the complete method with a query
response = llm.complete("Explain the importance of open source LLMs")
print(response)
```
"""
def __init__(
self,
model: str,
api_key: Optional[str] = None,
api_base: Optional[str] = None,
is_chat_model: bool = True,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("DATABRICKS_TOKEN", None)
api_base = api_base or os.environ.get("DATABRICKS_SERVING_ENDPOINT", None)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Databricks"
def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
"""Get the kwargs that need to be provided to the model invocation."""
# Fix the input to work with the Databricks API
if "tool_choice" in kwargs and "tools" not in kwargs:
del kwargs["tool_choice"]
return super()._get_model_kwargs(**kwargs)
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/main/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
def test_load_bytes():
uri = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
audio_bytes = uri.load_bytes()
assert isinstance(audio_bytes, bytes)
assert isinstance(audio_bytes, AudioBytes)
assert len(audio_bytes) > 0
|
from typing import Optional
import numpy as np
import pytest
import torch
from pydantic.tools import parse_obj_as, schema_json_of
from docarray import BaseDoc
from docarray.base_doc.io.json import orjson_dumps
from docarray.typing import AudioBytes, AudioTorchTensor, AudioUrl
from docarray.utils._internal.misc import is_tf_available
from tests import TOYDATA_DIR
tf_available = is_tf_available()
if tf_available:
import tensorflow as tf
from docarray.typing.tensor.audio import AudioTensorFlowTensor
AUDIO_FILES = [
str(TOYDATA_DIR / 'hello.wav'),
str(TOYDATA_DIR / 'olleh.wav'),
]
REMOTE_AUDIO_FILE = 'https://github.com/docarray/docarray/blob/feat-rewrite-v2/tests/toydata/olleh.wav?raw=true' # noqa: E501
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
tensor, _ = uri.load()
assert isinstance(tensor, np.ndarray)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_torch_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTorchTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, torch.Tensor)
assert isinstance(doc.tensor, AudioTorchTensor)
@pytest.mark.tensorflow
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load_audio_url_to_audio_tensorflow_tensor_field(file_url):
class MyAudioDoc(BaseDoc):
audio_url: AudioUrl
tensor: Optional[AudioTensorFlowTensor]
doc = MyAudioDoc(audio_url=file_url)
doc.tensor, _ = doc.audio_url.load()
assert isinstance(doc.tensor, AudioTensorFlowTensor)
assert isinstance(doc.tensor.tensor, tf.Tensor)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_load(file_url):
url = parse_obj_as(AudioUrl, file_url)
tensor, _ = url.load()
assert isinstance(tensor, np.ndarray)
def test_json_schema():
schema_json_of(AudioUrl)
def test_dump_json():
url = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'path_to_file',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_validation(path_to_file):
url = parse_obj_as(AudioUrl, path_to_file)
assert isinstance(url, AudioUrl)
assert isinstance(url, str)
@pytest.mark.proto
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_url',
[*AUDIO_FILES, REMOTE_AUDIO_FILE],
)
def test_proto_audio_url(file_url):
uri = parse_obj_as(AudioUrl, file_url)
proto = uri._to_node_protobuf()
assert 'audio_url' in str(proto)
def test_load_bytes():
uri = parse_obj_as(AudioUrl, REMOTE_AUDIO_FILE)
audio_bytes = uri.load_bytes()
assert isinstance(audio_bytes, bytes)
assert isinstance(audio_bytes, AudioBytes)
assert len(audio_bytes) > 0
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='RTMDetInsSepBNHead',
num_classes=80,
in_channels=256,
stacked_convs=2,
share_conv=True,
pred_kernel_size=1,
feat_channels=256,
act_cfg=dict(type='SiLU', inplace=True),
norm_cfg=dict(type='SyncBN', requires_grad=True),
anchor_generator=dict(
type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
bbox_coder=dict(type='DistancePointBBoxCoder'),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_mask=dict(
type='DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean')),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
mask_thr_binary=0.5),
)
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
train_dataloader = dict(pin_memory=True, dataset=dict(pipeline=train_pipeline))
train_pipeline_stage2 = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
val_evaluator = dict(metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
_base_ = './rtmdet_l_8xb32-300e_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='RTMDetInsSepBNHead',
num_classes=80,
in_channels=256,
stacked_convs=2,
share_conv=True,
pred_kernel_size=1,
feat_channels=256,
act_cfg=dict(type='SiLU', inplace=True),
norm_cfg=dict(type='SyncBN', requires_grad=True),
anchor_generator=dict(
type='MlvlPointGenerator', offset=0, strides=[8, 16, 32]),
bbox_coder=dict(type='DistancePointBBoxCoder'),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
loss_mask=dict(
type='DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean')),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
mask_thr_binary=0.5),
)
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='CachedMosaic', img_scale=(640, 640), pad_val=114.0),
dict(
type='RandomResize',
scale=(1280, 1280),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(
type='CachedMixUp',
img_scale=(640, 640),
ratio_range=(1.0, 1.0),
max_cached_images=20,
pad_val=(114, 114, 114)),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='PackDetInputs')
]
train_dataloader = dict(pin_memory=True, dataset=dict(pipeline=train_pipeline))
train_pipeline_stage2 = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='RandomResize',
scale=(640, 640),
ratio_range=(0.1, 2.0),
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=(640, 640),
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1)),
dict(type='YOLOXHSVRandomAug'),
dict(type='RandomFlip', prob=0.5),
dict(type='Pad', size=(640, 640), pad_val=dict(img=(114, 114, 114))),
dict(type='PackDetInputs')
]
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='PipelineSwitchHook',
switch_epoch=280,
switch_pipeline=train_pipeline_stage2)
]
val_evaluator = dict(metric=['bbox', 'segm'])
test_evaluator = val_evaluator
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PaiEasChatEndpoint": "langchain_community.chat_models.pai_eas_endpoint",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PaiEasChatEndpoint",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_models.pai_eas_endpoint import PaiEasChatEndpoint
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PaiEasChatEndpoint": "langchain_community.chat_models.pai_eas_endpoint"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PaiEasChatEndpoint",
]
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self, arg1: str = None, arg2: str = None, arg3: str = 'default-arg3', **kwargs
):
super().__init__(**kwargs)
self.port = self.runtime_args.port[0]
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
|
from typing import Optional
from docarray import Document, DocumentArray
from pydantic import BaseModel
from uvicorn import Config, Server
from jina import Gateway, __default_host__
from jina.clients.request import request_generator
class DummyResponseModel(BaseModel):
arg1: Optional[str]
arg2: Optional[str]
arg3: Optional[str]
class ProcessedResponseModel(BaseModel):
text: str
tags: Optional[dict]
class DummyGateway(Gateway):
def __init__(
self,
port: int = None,
arg1: str = None,
arg2: str = None,
arg3: str = 'default-arg3',
**kwargs
):
super().__init__(**kwargs)
self.port = port
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
async def setup_server(self):
from fastapi import FastAPI
app = FastAPI(
title='Dummy Server',
)
@app.get(path='/', response_model=DummyResponseModel)
def _get_response():
return {
'arg1': self.arg1,
'arg2': self.arg2,
'arg3': self.arg3,
}
@app.get(
path='/stream',
response_model=ProcessedResponseModel,
)
async def _process(text: str):
doc = None
async for req in self.streamer.stream(
request_generator(
exec_endpoint='/',
data=DocumentArray([Document(text=text)]),
)
):
doc = req.to_dict()['data'][0]
return {'text': doc['text'], 'tags': doc['tags']}
self.server = Server(Config(app, host=__default_host__, port=self.port))
async def run_server(self):
await self.server.serve()
async def teardown(self):
await super().teardown()
await self.server.shutdown()
async def stop_server(self):
self.server.should_exit = True
|
_base_ = './fast-rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
_base_ = './fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')))
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
with pytest.raises(AssertionError):
# pretrained and init_cfg cannot be setting at the same time
DetectoRS_ResNet(
**detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')
with pytest.raises(AssertionError):
# init_cfg must be a dict
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])
with pytest.raises(KeyError):
# init_cfg must contain the key `type`
DetectoRS_ResNet(
**detectorrs_cfg,
pretrained=None,
init_cfg=dict(checkpoint='Pretrained'))
with pytest.raises(AssertionError):
# init_cfg only support initialize pretrained model way
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))
with pytest.raises(TypeError):
# pretrained mast be a str or None
model = DetectoRS_ResNet(
**detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)
model.init_weights()
|
import pytest
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
with pytest.raises(AssertionError):
# pretrained and init_cfg cannot be setting at the same time
DetectoRS_ResNet(
**detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')
with pytest.raises(AssertionError):
# init_cfg must be a dict
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])
with pytest.raises(KeyError):
# init_cfg must contain the key `type`
DetectoRS_ResNet(
**detectorrs_cfg,
pretrained=None,
init_cfg=dict(checkpoint='Pretrained'))
with pytest.raises(AssertionError):
# init_cfg only support initialize pretrained model way
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))
with pytest.raises(TypeError):
# pretrained mast be a str or None
model = DetectoRS_ResNet(
**detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)
model.init_weights()
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity: Pearson: 0.8430 Spearman: 0.8368
Model Sparsity: Active Dimensions: 81.1, Sparsity Ratio: 0.9973
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
import logging
from datasets import load_dataset
from sentence_transformers import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import SparseEmbeddingSimilarityEvaluator
logging.basicConfig(format="%(message)s", level=logging.INFO)
# Load a model
model = SparseEncoder("naver/splade-cocondenser-ensembledistil")
model.similarity_fn_name = "cosine" # even though the model is trained with dot, we need to set it to cosine for evaluation as the score in the dataset is cosine similarity
# Load the STSB dataset (https://huggingface.co/datasets/sentence-transformers/stsb)
eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
# Initialize the evaluator
dev_evaluator = SparseEmbeddingSimilarityEvaluator(
sentences1=eval_dataset["sentence1"],
sentences2=eval_dataset["sentence2"],
scores=eval_dataset["score"],
name="sts_dev",
)
results = dev_evaluator(model)
"""
EmbeddingSimilarityEvaluator: Evaluating the model on the sts_dev dataset:
Cosine-Similarity : Pearson: 0.8430 Spearman: 0.8368
Model Sparsity Stats: Row Non-Zero Mean: 81.0629997253418, Row Sparsity Mean: 0.997344046831131
"""
# Print the results
print(f"Primary metric: {dev_evaluator.primary_metric}")
# => Primary metric: sts_dev_spearman_cosine
print(f"Primary metric value: {results[dev_evaluator.primary_metric]:.4f}")
# => Primary metric value: 0.8368
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.losses import CSRLoss
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
truncate_dim=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=200,
save_strategy="steps",
save_steps=200,
learning_rate=4e-5,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
from __future__ import annotations
import logging
from datasets import load_dataset
from sentence_transformers.evaluation import SequentialEvaluator, SimilarityFunction
from sentence_transformers.models import Pooling, Transformer
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.sparse_encoder.evaluation import (
SparseEmbeddingSimilarityEvaluator,
)
from sentence_transformers.sparse_encoder.losses import CSRLoss
from sentence_transformers.sparse_encoder.models import CSRSparsity
from sentence_transformers.sparse_encoder.trainer import SparseEncoderTrainer
from sentence_transformers.sparse_encoder.training_args import (
SparseEncoderTrainingArguments,
)
from sentence_transformers.training_args import BatchSamplers
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
# Initialize model components
model_name = "tomaarsen/mpnet-base-nli"
transformer = Transformer(model_name)
transformer.requires_grad_(False) # Freeze the transformer model
pooling = Pooling(transformer.get_word_embedding_dimension(), pooling_mode="mean")
csr_sparsity = CSRSparsity(
input_dim=transformer.get_word_embedding_dimension(),
hidden_dim=4 * transformer.get_word_embedding_dimension(),
k=256, # Number of top values to keep
k_aux=512, # Number of top values for auxiliary loss
)
# Create the SparseEncoder model
model = SparseEncoder(modules=[transformer, pooling, csr_sparsity])
output_dir = "examples/sparse_encoder/output/sparse_encoder_nli_frozen_transformer_from_pretrained"
# 2. Load the AllNLI dataset: https://huggingface.co/datasets/sentence-transformers/all-nli
train_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="train")
eval_dataset = load_dataset("sentence-transformers/all-nli", "triplet", split="dev")
logging.info(train_dataset)
# 3. Initialize the loss
loss = CSRLoss(
model=model,
beta=0.1, # Weight for auxiliary loss
gamma=1, # Weight for ranking loss
scale=20.0, # Scale for similarity computation
)
# 4. Define an evaluator for use during training. This is useful to keep track of alongside the evaluation loss.
stsb_eval_dataset = load_dataset("sentence-transformers/stsb", split="validation")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=stsb_eval_dataset["sentence1"],
sentences2=stsb_eval_dataset["sentence2"],
scores=stsb_eval_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-dev-{k_dim}",
truncate_dim=k_dim,
)
)
dev_evaluator = SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
# Set up training arguments
training_args = SparseEncoderTrainingArguments(
output_dir=output_dir,
num_train_epochs=1,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
warmup_ratio=0.1,
fp16=True, # Set to False if you get an error that your GPU can't run on FP16
bf16=False, # Set to True if you have a GPU that supports BF16
batch_sampler=BatchSamplers.NO_DUPLICATES, # MultipleNegativesRankingLoss benefits from no duplicate samples in a batch
logging_steps=100,
eval_strategy="steps",
eval_steps=1600,
save_strategy="steps",
save_steps=1600,
learning_rate=4e-6,
optim="adamw_torch",
weight_decay=1e-4,
adam_epsilon=6.25e-10,
run_name="sparse_encoder_nli_frozen_transformer_from_pretrained",
)
# Initialize trainer
trainer = SparseEncoderTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
loss=loss,
evaluator=dev_evaluator,
)
# Train model
trainer.train()
# 7. Evaluate the model performance on the STS Benchmark test dataset
test_dataset = load_dataset("sentence-transformers/stsb", split="test")
evaluators = []
for k_dim in [16, 32, 64, 128, 256]:
evaluators.append(
SparseEmbeddingSimilarityEvaluator(
sentences1=test_dataset["sentence1"],
sentences2=test_dataset["sentence2"],
scores=test_dataset["score"],
main_similarity=SimilarityFunction.COSINE,
name=f"sts-test-{k_dim}",
truncate_dim=k_dim,
)
)
test_evaluator = SequentialEvaluator(evaluators)
test_evaluator(model)
# 8. Save the trained & evaluated model locally
model.save(output_dir)
if __name__ == "__main__":
main()
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
# Copyright (c) OpenMMLab. All rights reserved.
__version__ = '3.0.0rc6'
short_version = __version__
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import (
SparseEncoder,
SparseEncoderModelCardData,
SparseEncoderTrainer,
SparseEncoderTrainingArguments,
)
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"SparseEncoder",
"SparseEncoderTrainer",
"SparseEncoderTrainingArguments",
"SparseEncoderModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
]
|
from __future__ import annotations
__version__ = "4.1.0.dev0"
__MODEL_HUB_ORGANIZATION__ = "sentence-transformers"
import importlib
import os
from sentence_transformers.backend import (
export_dynamic_quantized_onnx_model,
export_optimized_onnx_model,
export_static_quantized_openvino_model,
)
from sentence_transformers.cross_encoder import (
CrossEncoder,
CrossEncoderModelCardData,
CrossEncoderTrainer,
CrossEncoderTrainingArguments,
)
from sentence_transformers.datasets import ParallelSentencesDataset, SentencesDataset
from sentence_transformers.LoggingHandler import LoggingHandler
from sentence_transformers.model_card import SentenceTransformerModelCardData
from sentence_transformers.quantization import quantize_embeddings
from sentence_transformers.readers import InputExample
from sentence_transformers.SentenceTransformer import SentenceTransformer
from sentence_transformers.similarity_functions import SimilarityFunction
from sentence_transformers.sparse_encoder import SparseEncoder
from sentence_transformers.trainer import SentenceTransformerTrainer
from sentence_transformers.training_args import SentenceTransformerTrainingArguments
from sentence_transformers.util import mine_hard_negatives
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
__all__ = [
"LoggingHandler",
"SentencesDataset",
"ParallelSentencesDataset",
"SentenceTransformer",
"SimilarityFunction",
"InputExample",
"CrossEncoder",
"CrossEncoderTrainer",
"CrossEncoderTrainingArguments",
"CrossEncoderModelCardData",
"SentenceTransformerTrainer",
"SentenceTransformerTrainingArguments",
"SentenceTransformerModelCardData",
"quantize_embeddings",
"export_optimized_onnx_model",
"export_dynamic_quantized_onnx_model",
"export_static_quantized_openvino_model",
"mine_hard_negatives",
"SparseEncoder",
]
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two embeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('all-MiniLM-L6-v2')
train_examples = [
InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0),
]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=2)
train_loss = losses.ContrastiveLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
@property
def citation(self) -> str:
return """
@inproceedings{hadsell2006dimensionality,
author={Hadsell, R. and Chopra, S. and LeCun, Y.},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
title={Dimensionality Reduction by Learning an Invariant Mapping},
year={2006},
volume={2},
number={},
pages={1735-1742},
doi={10.1109/CVPR.2006.100}
}
"""
|
from enum import Enum
from typing import Iterable, Dict
import torch.nn.functional as F
from torch import nn, Tensor
from sentence_transformers.SentenceTransformer import SentenceTransformer
class SiameseDistanceMetric(Enum):
"""
The metric for the contrastive loss
"""
EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)
MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)
COSINE_DISTANCE = lambda x, y: 1 - F.cosine_similarity(x, y)
class ContrastiveLoss(nn.Module):
def __init__(
self,
model: SentenceTransformer,
distance_metric=SiameseDistanceMetric.COSINE_DISTANCE,
margin: float = 0.5,
size_average: bool = True,
):
"""
Contrastive loss. Expects as input two texts and a label of either 0 or 1. If the label == 1, then the distance between the
two embeddings is reduced. If the label == 0, then the distance between the embeddings is increased.
:param model: SentenceTransformer model
:param distance_metric: Function that returns a distance between two embeddings. The class SiameseDistanceMetric contains pre-defined metrices that can be used
:param margin: Negative samples (label == 0) should have a distance of at least the margin value.
:param size_average: Average by the size of the mini-batch.
References:
* Further information: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
* `Training Examples > Quora Duplicate Questions <../../examples/training/quora_duplicate_questions/README.html>`_
Requirements:
1. (anchor, positive/negative) pairs
Relations:
- :class:`OnlineContrastiveLoss` is similar, but uses hard positive and hard negative pairs.
It often yields better results.
Inputs:
+-----------------------------------------------+------------------------------+
| Texts | Labels |
+===============================================+==============================+
| (anchor, positive/negative) pairs | 1 if positive, 0 if negative |
+-----------------------------------------------+------------------------------+
Example:
::
from sentence_transformers import SentenceTransformer, losses
from sentence_transformers.readers import InputExample
from torch.utils.data import DataLoader
model = SentenceTransformer('all-MiniLM-L6-v2')
train_examples = [
InputExample(texts=['This is a positive pair', 'Where the distance will be minimized'], label=1),
InputExample(texts=['This is a negative pair', 'Their distance will be increased'], label=0),
]
train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=2)
train_loss = losses.ContrastiveLoss(model=model)
model.fit(
[(train_dataloader, train_loss)],
epochs=10,
)
"""
super(ContrastiveLoss, self).__init__()
self.distance_metric = distance_metric
self.margin = margin
self.model = model
self.size_average = size_average
def get_config_dict(self):
distance_metric_name = self.distance_metric.__name__
for name, value in vars(SiameseDistanceMetric).items():
if value == self.distance_metric:
distance_metric_name = "SiameseDistanceMetric.{}".format(name)
break
return {"distance_metric": distance_metric_name, "margin": self.margin, "size_average": self.size_average}
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.model(sentence_feature)["sentence_embedding"] for sentence_feature in sentence_features]
assert len(reps) == 2
rep_anchor, rep_other = reps
distances = self.distance_metric(rep_anchor, rep_other)
losses = 0.5 * (
labels.float() * distances.pow(2) + (1 - labels).float() * F.relu(self.margin - distances).pow(2)
)
return losses.mean() if self.size_average else losses.sum()
|
"""
Example of using callbacks with Dask
====================================
"""
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
import xgboost.dask as dxgb
from xgboost.dask import DaskDMatrix
def probability_for_going_backward(epoch):
return 0.999 / (1.0 + 0.05 * np.log(1.0 + epoch))
# All callback functions must inherit from TrainingCallback
class CustomEarlyStopping(xgb.callback.TrainingCallback):
"""A custom early stopping class where early stopping is determined stochastically.
In the beginning, allow the metric to become worse with a probability of 0.999.
As boosting progresses, the probability should be adjusted downward"""
def __init__(self, *, validation_set, target_metric, maximize, seed):
self.validation_set = validation_set
self.target_metric = target_metric
self.maximize = maximize
self.seed = seed
self.rng = np.random.default_rng(seed=seed)
if maximize:
self.better = lambda x, y: x > y
else:
self.better = lambda x, y: x < y
def after_iteration(self, model, epoch, evals_log):
metric_history = evals_log[self.validation_set][self.target_metric]
if len(metric_history) < 2 or self.better(
metric_history[-1], metric_history[-2]
):
return False # continue training
p = probability_for_going_backward(epoch)
go_backward = self.rng.choice(2, size=(1,), replace=True, p=[1 - p, p]).astype(
np.bool
)[0]
print(
"The validation metric went into the wrong direction. "
+ f"Stopping training with probability {1 - p}..."
)
if go_backward:
return False # continue training
else:
return True # stop training
def main(client):
m = 100000
n = 100
X, y = make_regression(n_samples=m, n_features=n, chunks=200, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
dtrain = DaskDMatrix(client, X_train, y_train)
dtest = DaskDMatrix(client, X_test, y_test)
output = dxgb.train(
client,
{
"verbosity": 1,
"tree_method": "hist",
"objective": "reg:squarederror",
"eval_metric": "rmse",
"max_depth": 6,
"learning_rate": 1.0,
},
dtrain,
num_boost_round=1000,
evals=[(dtrain, "train"), (dtest, "test")],
callbacks=[
CustomEarlyStopping(
validation_set="test", target_metric="rmse", maximize=False, seed=0
)
],
)
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
"""
Example of using callbacks with Dask
====================================
"""
import numpy as np
from dask.distributed import Client, LocalCluster
from dask_ml.datasets import make_regression
from dask_ml.model_selection import train_test_split
import xgboost as xgb
from xgboost.dask import DaskDMatrix
def probability_for_going_backward(epoch):
return 0.999 / (1.0 + 0.05 * np.log(1.0 + epoch))
# All callback functions must inherit from TrainingCallback
class CustomEarlyStopping(xgb.callback.TrainingCallback):
"""A custom early stopping class where early stopping is determined stochastically.
In the beginning, allow the metric to become worse with a probability of 0.999.
As boosting progresses, the probability should be adjusted downward"""
def __init__(self, *, validation_set, target_metric, maximize, seed):
self.validation_set = validation_set
self.target_metric = target_metric
self.maximize = maximize
self.seed = seed
self.rng = np.random.default_rng(seed=seed)
if maximize:
self.better = lambda x, y: x > y
else:
self.better = lambda x, y: x < y
def after_iteration(self, model, epoch, evals_log):
metric_history = evals_log[self.validation_set][self.target_metric]
if len(metric_history) < 2 or self.better(
metric_history[-1], metric_history[-2]
):
return False # continue training
p = probability_for_going_backward(epoch)
go_backward = self.rng.choice(2, size=(1,), replace=True, p=[1 - p, p]).astype(
np.bool
)[0]
print(
"The validation metric went into the wrong direction. "
+ f"Stopping training with probability {1 - p}..."
)
if go_backward:
return False # continue training
else:
return True # stop training
def main(client):
m = 100000
n = 100
X, y = make_regression(n_samples=m, n_features=n, chunks=200, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
dtrain = DaskDMatrix(client, X_train, y_train)
dtest = DaskDMatrix(client, X_test, y_test)
output = xgb.dask.train(
client,
{
"verbosity": 1,
"tree_method": "hist",
"objective": "reg:squarederror",
"eval_metric": "rmse",
"max_depth": 6,
"learning_rate": 1.0,
},
dtrain,
num_boost_round=1000,
evals=[(dtrain, "train"), (dtest, "test")],
callbacks=[
CustomEarlyStopping(
validation_set="test", target_metric="rmse", maximize=False, seed=0
)
],
)
if __name__ == "__main__":
# or use other clusters for scaling
with LocalCluster(n_workers=4, threads_per_worker=1) as cluster:
with Client(cluster) as client:
main(client)
|
import torch
from torchvision import tv_tensors
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
"""[BETA] See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details."""
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, tv_tensors.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
"""[BETA] See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details."""
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
"""
Tests the correct computation of evaluation scores from TripletEvaluator
"""
from __future__ import annotations
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
def test_TripletEvaluator(stsb_bert_tiny_model: SentenceTransformer) -> None:
"""Tests that the TripletEvaluator can be loaded & used"""
model = stsb_bert_tiny_model
anchors = [
"A person on a horse jumps over a broken down airplane.",
"Children smiling and waving at camera",
"A boy is jumping on skateboard in the middle of a red bridge.",
]
positives = [
"A person is outdoors, on a horse.",
"There are children looking at the camera.",
"The boy does a skateboarding trick.",
]
negatives = [
"A person is at a diner, ordering an omelette.",
"The kids are frowning",
"The boy skates down the sidewalk.",
]
evaluator = TripletEvaluator(anchors, positives, negatives, name="all_nli_dev")
metrics = evaluator(model)
assert evaluator.primary_metric == "all_nli_dev_cosine_accuracy"
assert metrics[evaluator.primary_metric] == 1.0
evaluator_with_margin = TripletEvaluator(anchors, positives, negatives, margin=0.7, name="all_nli_dev")
metrics = evaluator_with_margin(model)
assert metrics[evaluator.primary_metric] == 0.0
|
"""
Tests the correct computation of evaluation scores from TripletEvaluator
"""
from __future__ import annotations
from sentence_transformers import SentenceTransformer
from sentence_transformers.evaluation import TripletEvaluator
def test_TripletEvaluator(stsb_bert_tiny_model_reused: SentenceTransformer) -> None:
"""Tests that the TripletEvaluator can be loaded & used"""
model = stsb_bert_tiny_model_reused
anchors = [
"A person on a horse jumps over a broken down airplane.",
"Children smiling and waving at camera",
"A boy is jumping on skateboard in the middle of a red bridge.",
]
positives = [
"A person is outdoors, on a horse.",
"There are children looking at the camera.",
"The boy does a skateboarding trick.",
]
negatives = [
"A person is at a diner, ordering an omelette.",
"The kids are frowning",
"The boy skates down the sidewalk.",
]
evaluator = TripletEvaluator(anchors, positives, negatives, name="all_nli_dev")
metrics = evaluator(model)
assert evaluator.primary_metric == "all_nli_dev_cosine_accuracy"
assert metrics[evaluator.primary_metric] == 1.0
evaluator_with_margin = TripletEvaluator(anchors, positives, negatives, margin=0.7, name="all_nli_dev")
metrics = evaluator_with_margin(model)
assert metrics[evaluator.primary_metric] == 0.0
|
"""Tests related to the `DataIter` interface."""
import numpy as np
import xgboost
from xgboost import testing as tm
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, cache=None, on_host=False)
Xy_0 = xgboost.QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = xgboost.QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
|
"""Tests related to the `DataIter` interface."""
import numpy as np
import xgboost
from xgboost import testing as tm
def run_mixed_sparsity(device: str) -> None:
"""Check QDM with mixed batches."""
X_0, y_0, _ = tm.make_regression(128, 16, False)
if device.startswith("cuda"):
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
else:
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, False)
X_2, y_2 = tm.make_sparse_regression(512, 16, 0.9, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
if device.startswith("cuda"):
import cupy as cp # pylint: disable=import-error
X = [cp.array(batch) for batch in X]
it = tm.IteratorForTest(X, y, None, None, on_host=False)
Xy_0 = xgboost.QuantileDMatrix(it)
X_1, y_1 = tm.make_sparse_regression(256, 16, 0.1, True)
X = [X_0, X_1, X_2]
y = [y_0, y_1, y_2]
X_arr = np.concatenate(X, axis=0)
y_arr = np.concatenate(y, axis=0)
Xy_1 = xgboost.QuantileDMatrix(X_arr, y_arr)
assert tm.predictor_equal(Xy_0, Xy_1)
|
import numpy as np
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.document.io.json import orjson_dumps
from docarray.typing import Embedding
def test_proto_embedding():
embedding = parse_obj_as(Embedding, np.zeros((3, 224, 224)))
embedding._to_node_protobuf()
def test_json_schema():
schema_json_of(Embedding)
def test_dump_json():
tensor = parse_obj_as(Embedding, np.zeros((3, 224, 224)))
orjson_dumps(tensor)
|
import numpy as np
from pydantic.tools import parse_obj_as
from docarray.typing import Embedding
def test_proto_embedding():
uri = parse_obj_as(Embedding, np.zeros((3, 224, 224)))
uri._to_node_protobuf()
|
import pathlib
from argparse import ArgumentParser
def main(args):
wheel_path = pathlib.Path(args.wheel_path).expanduser().resolve()
if not wheel_path.exists():
raise ValueError(f"Wheel cannot be found at path {wheel_path}")
if not wheel_path.is_file():
raise ValueError(f"Path {wheel_path} is not a valid file")
wheel_dir, wheel_name = wheel_path.parent, wheel_path.name
tokens = wheel_name.split("-")
assert len(tokens) == 5
version = tokens[1].split("+")[0]
keywords = {
"pkg_name": tokens[0],
"version": version,
"commit_id": args.commit_hash,
"platform_tag": args.platform_tag,
}
new_wheel_name = (
"{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl".format(
**keywords
)
)
new_wheel_path = wheel_dir / new_wheel_name
print(f"Renaming {wheel_name} to {new_wheel_name}...")
if new_wheel_path.is_file():
new_wheel_path.unlink()
wheel_path.rename(new_wheel_path)
filesize = new_wheel_path.stat().st_size / 1024 / 1024 # MiB
print(f"Wheel size: {filesize:.2f} MiB")
if filesize > 300:
raise RuntimeError(
f"Limit of wheel size set by PyPI is exceeded. {new_wheel_name}: {filesize:.2f} MiB"
)
if __name__ == "__main__":
parser = ArgumentParser(
description="Format a Python wheel's name using the git commit hash and platform tag"
)
parser.add_argument(
"--wheel-path", type=str, required=True, help="Path to the wheel"
)
parser.add_argument(
"--commit-hash", type=str, required=True, help="Git commit hash"
)
parser.add_argument(
"--platform-tag",
type=str,
required=True,
help="Platform tag (e.g. manylinux2014_x86_64)",
)
parsed_args = parser.parse_args()
main(parsed_args)
|
import os
import sys
from test_utils import DirectoryExcursion
if len(sys.argv) != 4:
print("Usage: {} [wheel to rename] [commit id] [platform tag]".format(sys.argv[0]))
sys.exit(1)
whl_path = sys.argv[1]
commit_id = sys.argv[2]
platform_tag = sys.argv[3]
dirname, basename = os.path.dirname(whl_path), os.path.basename(whl_path)
with DirectoryExcursion(dirname):
tokens = basename.split("-")
assert len(tokens) == 5
version = tokens[1].split("+")[0]
keywords = {
"pkg_name": tokens[0],
"version": version,
"commit_id": commit_id,
"platform_tag": platform_tag,
}
new_name = "{pkg_name}-{version}+{commit_id}-py3-none-{platform_tag}.whl".format(
**keywords
)
print("Renaming {} to {}...".format(basename, new_name))
if os.path.isfile(new_name):
os.remove(new_name)
os.rename(basename, new_name)
filesize = os.path.getsize(new_name) / 1024 / 1024 # MB
print(f"Wheel size: {filesize}")
msg = f"Limit of wheel size set by PyPI is exceeded. {new_name}: {filesize}"
assert filesize <= 300, msg
|
from typing import Type
from .document import BaseDocument
class AnyDocument(BaseDocument):
"""
AnyDocument is a Document that is not tied to any schema
"""
def __init__(self, **kwargs):
super().__init__()
self.__dict__.update(kwargs)
@classmethod
def _get_field_type(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema.
Could be useful for reconstruction of Document in
serialization/deserilization
:param field: name of the field
:return:
"""
return AnyDocument
|
from typing import Type
from .document import BaseDocument
class AnyDocument(BaseDocument):
"""
AnyDocument is a Document that is not tied to any schema
"""
def __init__(self, **kwargs):
super().__init__()
self.__dict__.update(kwargs)
@classmethod
def _get_nested_document_class(cls, field: str) -> Type['BaseDocument']:
"""
Accessing the nested python Class define in the schema.
Could be useful for reconstruction of Document in
serialization/deserilization
:param field: name of the field
:return:
"""
return AnyDocument
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CrowdHumanDataset
class TestCrowdHumanDataset(unittest.TestCase):
def test_crowdhuman_init(self):
dataset = CrowdHumanDataset(
data_root='tests/data/crowdhuman_dataset/',
ann_file='test_annotation_train.odgt',
data_prefix=dict(img='Images/'),
pipeline=[])
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['classes'], ('person', ))
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
from mmdet.datasets import CrowdHumanDataset
class TestCrowdHumanDataset(unittest.TestCase):
def test_crowdhuman_init(self):
dataset = CrowdHumanDataset(
data_root='tests/data/crowdhuman_dataset/',
ann_file='test_annotation_train.odgt',
data_prefix=dict(img='Images/'),
pipeline=[])
self.assertEqual(len(dataset), 1)
self.assertEqual(dataset.metainfo['CLASSES'], ('person', ))
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.cleanlab import CleanlabTLM
from llama_index.llms.cleanlab.base import DEFAULT_MODEL, DEFAULT_MAX_TOKENS
def test_llms_cleanlab():
names_of_base_classes = [b.__name__ for b in CleanlabTLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
def test_init_defaults():
llm = CleanlabTLM(api_key="x")
assert llm.model == DEFAULT_MODEL
assert llm.max_tokens == DEFAULT_MAX_TOKENS
def test_init_with_option_overrides():
override_model = "gpt-4.1"
override_max_tokens = 1024
options = {"model": override_model, "max_tokens": override_max_tokens}
llm = CleanlabTLM(api_key="x", options=options)
assert llm.model == override_model
assert llm.max_tokens == override_max_tokens
|
from llama_index.core.base.llms.base import BaseLLM
from llama_index.llms.cleanlab import CleanlabTLM
def test_llms_cleanlab():
names_of_base_classes = [b.__name__ for b in CleanlabTLM.__mro__]
assert BaseLLM.__name__ in names_of_base_classes
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_binary_metric import (
AnswerConsistencyBinaryMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyBinaryEvaluator(BaseEvaluator):
"""
Tonic Validate's answer consistency binary metric.
The output score is a float that is either 0.0 or 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyBinaryMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
from typing import Any, Optional, Sequence
from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult
from llama_index.core.prompts.mixin import PromptDictType, PromptMixinType
from tonic_validate.metrics.answer_consistency_binary_metric import (
AnswerConsistencyBinaryMetric,
)
from tonic_validate.services.openai_service import OpenAIService
class AnswerConsistencyBinaryEvaluator(BaseEvaluator):
"""Tonic Validate's answer consistency binary metric.
The output score is a float that is either 0.0 or 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AnswerConsistencyBinaryMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.