input
stringlengths 33
5k
| output
stringlengths 32
5k
|
---|---|
# Copyright (c) OpenMMLab. All rights reserved.
# This script consists of several convert functions which
# can modify the weights of model in original repo to be
# pre-trained weights.
from collections import OrderedDict
import torch
def pvt_convert(ckpt):
new_ckpt = OrderedDict()
# Process the concat between q linear weights and kv linear weights
use_abs_pos_embed = False
use_conv_ffn = False
for k in ckpt.keys():
if k.startswith('pos_embed'):
use_abs_pos_embed = True
if k.find('dwconv') >= 0:
use_conv_ffn = True
for k, v in ckpt.items():
if k.startswith('head'):
continue
if k.startswith('norm.'):
continue
if k.startswith('cls_token'):
continue
if k.startswith('pos_embed'):
stage_i = int(k.replace('pos_embed', ''))
new_k = k.replace(f'pos_embed{stage_i}',
f'layers.{stage_i - 1}.1.0.pos_embed')
if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7
new_v = v[:, 1:, :] # remove cls token
else:
new_v = v
elif k.startswith('patch_embed'):
stage_i = int(k.split('.')[0].replace('patch_embed', ''))
new_k = k.replace(f'patch_embed{stage_i}',
f'layers.{stage_i - 1}.0')
new_v = v
if 'proj.' in new_k:
new_k = new_k.replace('proj.', 'projection.')
elif k.startswith('block'):
stage_i = int(k.split('.')[0].replace('block', ''))
layer_i = int(k.split('.')[1])
new_layer_i = layer_i + use_abs_pos_embed
new_k = k.replace(f'block{stage_i}.{layer_i}',
f'layers.{stage_i - 1}.1.{new_layer_i}')
new_v = v
if 'attn.q.' in new_k:
sub_item_k = k.replace('q.', 'kv.')
new_k = new_k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)
elif 'attn.kv.' in new_k:
continue
elif 'attn.proj.' in new_k:
new_k = new_k.replace('proj.', 'attn.out_proj.')
elif 'attn.sr.' in new_k:
new_k = new_k.replace('sr.', 'sr.')
elif 'mlp.' in new_k:
string = f'{new_k}-'
new_k = new_k.replace('mlp.', 'ffn.layers.')
if 'fc1.weight' in new_k or 'fc2.weight' in new_k:
new_v = v.reshape((*v.shape, 1, 1))
new_k = new_k.replace('fc1.', '0.')
new_k = new_k.replace('dwconv.dwconv.', '1.')
if use_conv_ffn:
new_k = new_k.replace('fc2.', '4.')
else:
new_k = new_k.replace('fc2.', '3.')
string += f'{new_k} {v.shape}-{new_v.shape}'
elif k.startswith('norm'):
stage_i = int(k[4])
new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2')
new_v = v
else:
new_k = k
new_v = v
new_ckpt[new_k] = new_v
return new_ckpt
def swin_converter(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt['backbone.' + new_k] = new_v
return new_ckpt
|
# Copyright (c) OpenMMLab. All rights reserved.
# This script consists of several convert functions which
# can modify the weights of model in original repo to be
# pre-trained weights.
from collections import OrderedDict
def swin_converter(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt['backbone.' + new_k] = new_v
return new_ckpt
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(text=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
from PIL import Image
from sentence_transformers import SentenceTransformer, models, util
###########
image = Image.open("two_dogs_in_snow.jpg")
from transformers import CLIPModel, CLIPProcessor
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
inputs = processor(texts=["a cat", "a dog"], images=[image], return_tensors="pt", padding=True)
output = model(**inputs)
# vision_outputs = model.vision_model(pixel_values=inputs['pixel_values'])
# image_embeds = model.visual_projection(vision_outputs[1])
# print(image_embeds.shape)
# exit()
# Load CLIP model
clip = models.CLIPModel()
model = SentenceTransformer(modules=[clip])
model.save("tmp-clip-model")
model = SentenceTransformer("tmp-clip-model")
# Encode an image:
img_emb = model.encode(Image.open("two_dogs_in_snow.jpg"))
# Encode text descriptions
text_emb = model.encode(["Two dogs in the snow", "A cat on a table", "A picture of London at night"])
# Compute cosine similarities
cos_scores = util.cos_sim(img_emb, text_emb)
print(cos_scores)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.models import build_detector
from mmdet.structures import DetDataSample
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test loss mode
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
out = detector.forward(batch_inputs, data_samples, mode='tensor')
self.assertIsInstance(out, tuple)
|
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from parameterized import parameterized
from mmdet.data_elements import DetDataSample
from mmdet.models import build_detector
from mmdet.testing._utils import demo_mm_inputs, get_detector_cfg
from mmdet.utils import register_all_modules
class TestTwoStagePanopticSegmentor(unittest.TestCase):
def setUp(self):
register_all_modules()
def _create_model_cfg(self):
cfg_file = 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py'
model_cfg = get_detector_cfg(cfg_file)
model_cfg.backbone.depth = 18
model_cfg.neck.in_channels = [64, 128, 256, 512]
model_cfg.backbone.init_cfg = None
return model_cfg
def test_init(self):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
assert detector.backbone
assert detector.neck
assert detector.rpn_head
assert detector.roi_head
assert detector.roi_head.mask_head
assert detector.with_semantic_head
assert detector.with_panoptic_fusion_head
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_loss_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, True)
# Test loss mode
losses = detector.forward(batch_inputs, data_samples, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_predict_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2,
image_shapes=[(3, 128, 127), (3, 91, 92)],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([('cpu', ), ('cuda', )])
def test_forward_tensor_mode(self, device):
model_cfg = self._create_model_cfg()
detector = build_detector(model_cfg)
if device == 'cuda' and not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.to(device)
packed_inputs = demo_mm_inputs(
2, [[3, 128, 128], [3, 125, 130]],
sem_seg_output_strides=1,
with_mask=True,
with_semantic=True)
batch_inputs, data_samples = detector.data_preprocessor(
packed_inputs, False)
out = detector.forward(batch_inputs, data_samples, mode='tensor')
self.assertIsInstance(out, tuple)
|
"""Test Ollama embeddings."""
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_ollama.embeddings import OllamaEmbeddings
MODEL_NAME = "llama3.1"
class TestOllamaEmbeddings(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> type[OllamaEmbeddings]:
return OllamaEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"model": MODEL_NAME}
|
"""Test Ollama embeddings."""
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
from langchain_ollama.embeddings import OllamaEmbeddings
class TestOllamaEmbeddings(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> type[OllamaEmbeddings]:
return OllamaEmbeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "llama3:latest"}
|
from typing import Any, Mapping, Optional
from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler
class AirbyteHubspotReader(AirbyteCDKReader):
"""
AirbyteHubspotReader reader.
Retrieve documents from Hubspot
Args:
config: The config object for the hubspot source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_hubspot
super().__init__(
source_class=source_hubspot.SourceHubspot,
config=config,
record_handler=record_handler,
)
|
from typing import Any, Mapping, Optional
from llama_index.readers.airbyte_cdk.base import AirbyteCDKReader, RecordHandler
class AirbyteHubspotReader(AirbyteCDKReader):
"""AirbyteHubspotReader reader.
Retrieve documents from Hubspot
Args:
config: The config object for the hubspot source.
"""
def __init__(
self,
config: Mapping[str, Any],
record_handler: Optional[RecordHandler] = None,
) -> None:
"""Initialize with parameters."""
import source_hubspot
super().__init__(
source_class=source_hubspot.SourceHubspot,
config=config,
record_handler=record_handler,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import platform
import time
import pytest
import mmengine
@pytest.mark.skipif(
platform.system() != 'Linux', reason='Only test `Timer` in linux!')
def test_timer_init():
timer = mmengine.Timer(start=False)
assert not timer.is_running
timer.start()
assert timer.is_running
timer = mmengine.Timer()
assert timer.is_running
@pytest.mark.skipif(
platform.system() != 'Linux', reason='Only test `Timer` in linux!')
def test_timer_run():
timer = mmengine.Timer()
time.sleep(1)
# In Windows, the error could be larger than 20ms. More details in
# https://stackoverflow.com/questions/11657734/sleep-for-exact-time-in-python. # noqa: E501
assert abs(timer.since_start() - 1) < 3e-2
time.sleep(1)
assert abs(timer.since_last_check() - 1) < 3e-2
assert abs(timer.since_start() - 2) < 3e-2
timer = mmengine.Timer(False)
with pytest.raises(mmengine.TimerError):
timer.since_start()
with pytest.raises(mmengine.TimerError):
timer.since_last_check()
@pytest.mark.skipif(
platform.system() != 'Linux', reason='Only test `Timer` in linux!')
def test_timer_context(capsys):
with mmengine.Timer():
time.sleep(1)
out, _ = capsys.readouterr()
# In Windows, the error could be larger than 20ms. More details in
# https://stackoverflow.com/questions/11657734/sleep-for-exact-time-in-python. # noqa: E501
assert abs(float(out) - 1) < 3e-2
with mmengine.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
out, _ = capsys.readouterr()
assert out == 'time: 1.0s\n'
|
# Copyright (c) OpenMMLab. All rights reserved.
import time
import pytest
import mmengine
def test_timer_init():
timer = mmengine.Timer(start=False)
assert not timer.is_running
timer.start()
assert timer.is_running
timer = mmengine.Timer()
assert timer.is_running
def test_timer_run():
timer = mmengine.Timer()
time.sleep(1)
# In Windows, the error could be larger than 20ms. More details in
# https://stackoverflow.com/questions/11657734/sleep-for-exact-time-in-python. # noqa: E501
assert abs(timer.since_start() - 1) < 3e-2
time.sleep(1)
assert abs(timer.since_last_check() - 1) < 3e-2
assert abs(timer.since_start() - 2) < 3e-2
timer = mmengine.Timer(False)
with pytest.raises(mmengine.TimerError):
timer.since_start()
with pytest.raises(mmengine.TimerError):
timer.since_last_check()
def test_timer_context(capsys):
with mmengine.Timer():
time.sleep(1)
out, _ = capsys.readouterr()
# In Windows, the error could be larger than 20ms. More details in
# https://stackoverflow.com/questions/11657734/sleep-for-exact-time-in-python. # noqa: E501
assert abs(float(out) - 1) < 3e-2
with mmengine.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
out, _ = capsys.readouterr()
assert out == 'time: 1.0s\n'
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
AstraDBChatMessageHistory,
CassandraChatMessageHistory,
ChatMessageHistory,
CosmosDBChatMessageHistory,
DynamoDBChatMessageHistory,
ElasticsearchChatMessageHistory,
FileChatMessageHistory,
FirestoreChatMessageHistory,
MomentoChatMessageHistory,
MongoDBChatMessageHistory,
Neo4jChatMessageHistory,
PostgresChatMessageHistory,
RedisChatMessageHistory,
RocksetChatMessageHistory,
SingleStoreDBChatMessageHistory,
SQLChatMessageHistory,
StreamlitChatMessageHistory,
UpstashRedisChatMessageHistory,
XataChatMessageHistory,
ZepChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AstraDBChatMessageHistory": "langchain_community.chat_message_histories",
"CassandraChatMessageHistory": "langchain_community.chat_message_histories",
"ChatMessageHistory": "langchain_community.chat_message_histories",
"CosmosDBChatMessageHistory": "langchain_community.chat_message_histories",
"DynamoDBChatMessageHistory": "langchain_community.chat_message_histories",
"ElasticsearchChatMessageHistory": "langchain_community.chat_message_histories",
"FileChatMessageHistory": "langchain_community.chat_message_histories",
"FirestoreChatMessageHistory": "langchain_community.chat_message_histories",
"MomentoChatMessageHistory": "langchain_community.chat_message_histories",
"MongoDBChatMessageHistory": "langchain_community.chat_message_histories",
"Neo4jChatMessageHistory": "langchain_community.chat_message_histories",
"PostgresChatMessageHistory": "langchain_community.chat_message_histories",
"RedisChatMessageHistory": "langchain_community.chat_message_histories",
"RocksetChatMessageHistory": "langchain_community.chat_message_histories",
"SQLChatMessageHistory": "langchain_community.chat_message_histories",
"SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories",
"StreamlitChatMessageHistory": "langchain_community.chat_message_histories",
"UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories",
"XataChatMessageHistory": "langchain_community.chat_message_histories",
"ZepChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AstraDBChatMessageHistory",
"CassandraChatMessageHistory",
"ChatMessageHistory",
"CosmosDBChatMessageHistory",
"DynamoDBChatMessageHistory",
"ElasticsearchChatMessageHistory",
"FileChatMessageHistory",
"FirestoreChatMessageHistory",
"MomentoChatMessageHistory",
"MongoDBChatMessageHistory",
"Neo4jChatMessageHistory",
"PostgresChatMessageHistory",
"RedisChatMessageHistory",
"RocksetChatMessageHistory",
"SQLChatMessageHistory",
"SingleStoreDBChatMessageHistory",
"StreamlitChatMessageHistory",
"UpstashRedisChatMessageHistory",
"XataChatMessageHistory",
"ZepChatMessageHistory",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.chat_message_histories import (
AstraDBChatMessageHistory,
CassandraChatMessageHistory,
ChatMessageHistory,
CosmosDBChatMessageHistory,
DynamoDBChatMessageHistory,
ElasticsearchChatMessageHistory,
FileChatMessageHistory,
FirestoreChatMessageHistory,
MomentoChatMessageHistory,
MongoDBChatMessageHistory,
Neo4jChatMessageHistory,
PostgresChatMessageHistory,
RedisChatMessageHistory,
RocksetChatMessageHistory,
SingleStoreDBChatMessageHistory,
SQLChatMessageHistory,
StreamlitChatMessageHistory,
UpstashRedisChatMessageHistory,
XataChatMessageHistory,
ZepChatMessageHistory,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AstraDBChatMessageHistory": "langchain_community.chat_message_histories",
"CassandraChatMessageHistory": "langchain_community.chat_message_histories",
"ChatMessageHistory": "langchain_community.chat_message_histories",
"CosmosDBChatMessageHistory": "langchain_community.chat_message_histories",
"DynamoDBChatMessageHistory": "langchain_community.chat_message_histories",
"ElasticsearchChatMessageHistory": "langchain_community.chat_message_histories",
"FileChatMessageHistory": "langchain_community.chat_message_histories",
"FirestoreChatMessageHistory": "langchain_community.chat_message_histories",
"MomentoChatMessageHistory": "langchain_community.chat_message_histories",
"MongoDBChatMessageHistory": "langchain_community.chat_message_histories",
"Neo4jChatMessageHistory": "langchain_community.chat_message_histories",
"PostgresChatMessageHistory": "langchain_community.chat_message_histories",
"RedisChatMessageHistory": "langchain_community.chat_message_histories",
"RocksetChatMessageHistory": "langchain_community.chat_message_histories",
"SQLChatMessageHistory": "langchain_community.chat_message_histories",
"SingleStoreDBChatMessageHistory": "langchain_community.chat_message_histories",
"StreamlitChatMessageHistory": "langchain_community.chat_message_histories",
"UpstashRedisChatMessageHistory": "langchain_community.chat_message_histories",
"XataChatMessageHistory": "langchain_community.chat_message_histories",
"ZepChatMessageHistory": "langchain_community.chat_message_histories",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AstraDBChatMessageHistory",
"CassandraChatMessageHistory",
"ChatMessageHistory",
"CosmosDBChatMessageHistory",
"DynamoDBChatMessageHistory",
"ElasticsearchChatMessageHistory",
"FileChatMessageHistory",
"FirestoreChatMessageHistory",
"MomentoChatMessageHistory",
"MongoDBChatMessageHistory",
"Neo4jChatMessageHistory",
"PostgresChatMessageHistory",
"RedisChatMessageHistory",
"RocksetChatMessageHistory",
"SingleStoreDBChatMessageHistory",
"SQLChatMessageHistory",
"StreamlitChatMessageHistory",
"UpstashRedisChatMessageHistory",
"XataChatMessageHistory",
"ZepChatMessageHistory",
]
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
# test with format_only=True, keep_results=False
with self.assertRaises(AssertionError):
CityScapesMetric(
outfile_prefix=self.tmp_dir.name + 'test',
format_only=True,
keep_results=False)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
keep_results=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
keep_results=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
classes=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
import os
import os.path as osp
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from mmdet.evaluation import CityScapesMetric
try:
import cityscapesscripts
except ImportError:
cityscapesscripts = None
class TestCityScapesMetric(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tmp_dir.cleanup()
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_init(self):
# test with outfile_prefix = None
with self.assertRaises(AssertionError):
CityScapesMetric(outfile_prefix=None)
# test with format_only=True, keep_results=False
with self.assertRaises(AssertionError):
CityScapesMetric(
outfile_prefix=self.tmp_dir.name + 'test',
format_only=True,
keep_results=False)
@unittest.skipIf(cityscapesscripts is None,
'cityscapesscripts is not installed.')
def test_evaluate(self):
dummy_mask1 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask1[:, :10, :10] = 1
dummy_mask2 = np.zeros((1, 20, 20), dtype=np.uint8)
dummy_mask2[:, :10, :10] = 1
self.outfile_prefix = osp.join(self.tmp_dir.name, 'test')
self.seg_prefix = osp.join(self.tmp_dir.name, 'cityscapes/gtFine/val')
city = 'lindau'
sequenceNb = '000000'
frameNb = '000019'
img_name1 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path1 = osp.join(self.seg_prefix, city, img_name1)
frameNb = '000020'
img_name2 = f'{city}_{sequenceNb}_{frameNb}_gtFine_instanceIds.png'
img_path2 = osp.join(self.seg_prefix, city, img_name2)
os.makedirs(osp.join(self.seg_prefix, city))
masks1 = np.zeros((20, 20), dtype=np.int32)
masks1[:10, :10] = 24 * 1000
Image.fromarray(masks1).save(img_path1)
masks2 = np.zeros((20, 20), dtype=np.int32)
masks2[:10, :10] = 24 * 1000 + 1
Image.fromarray(masks2).save(img_path2)
data_samples = [{
'img_path': img_path1,
'pred_instances': {
'scores': torch.from_numpy(np.array([1.0])),
'labels': torch.from_numpy(np.array([0])),
'masks': torch.from_numpy(dummy_mask1)
}
}, {
'img_path': img_path2,
'pred_instances': {
'scores': torch.from_numpy(np.array([0.98])),
'labels': torch.from_numpy(np.array([1])),
'masks': torch.from_numpy(dummy_mask2)
}
}]
target = {'cityscapes/mAP': 0.5, 'cityscapes/AP@50': 0.5}
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=False,
keep_results=False,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
CLASSES=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, target)
del metric
self.assertTrue(not osp.exists('{self.outfile_prefix}.results'))
# test format_only
metric = CityScapesMetric(
seg_prefix=self.seg_prefix,
format_only=True,
keep_results=True,
outfile_prefix=self.outfile_prefix)
metric.dataset_meta = dict(
CLASSES=('person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle'))
metric.process({}, data_samples)
results = metric.evaluate(size=2)
self.assertDictEqual(results, dict())
|
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from jina import Client, Document, Executor, Flow, requests
from jina.helper import random_port
class MyExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'I am coming from MyExecutor'
def flow_run(flow, stop_event):
with flow:
flow.block(stop_event)
def client_post(doc, port, client):
result = client.post(on='/', inputs=doc)[0]
print(f'doc.id {doc.id} vs result.id {result.id}')
return result
def test_multithread_client(capsys):
port = random_port()
stop_event = threading.Event()
flow = Flow(port=port).add(uses=MyExecutor)
t = threading.Thread(target=flow_run, args=(flow, stop_event))
c = Client(port=port)
try:
with capsys.disabled():
t.start()
time.sleep(5)
with ThreadPoolExecutor(max_workers=50) as pool:
tasks = []
for i in range(1000):
doc = Document(id=f'{i}', text='hello world')
task = pool.submit(client_post, doc, port, c)
tasks.append(task)
for i, task in enumerate(tasks):
result = task.result()
assert result.id == f'{i}'
assert result.text == 'I am coming from MyExecutor'
with capsys.disabled():
stdout, stderr = capsys.readouterr()
assert 'BlockingIOError' not in stderr
finally:
stop_event.set()
t.join()
|
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from jina import Client, Document, Executor, Flow, requests
from jina.helper import random_port
class MyExecutor(Executor):
@requests
def foo(self, docs, **kwargs):
for doc in docs:
doc.text = 'I am coming from MyExecutor'
def flow_run(flow, stop_event):
with flow:
flow.block(stop_event)
def client_post(doc, client):
result = client.post(on='/', inputs=doc)[0]
return result
def test_multithread_client(capsys):
port = random_port()
stop_event = threading.Event()
flow = Flow(port=port).add(uses=MyExecutor)
t = threading.Thread(target=flow_run, args=(flow, stop_event))
c = Client(port=port)
try:
with capsys.disabled():
t.start()
time.sleep(5)
with ThreadPoolExecutor(max_workers=50) as pool:
tasks = []
for i in range(1000):
doc = Document(id=f'{i}', text='hello world')
task = pool.submit(client_post, doc, c)
tasks.append(task)
for i,task in enumerate(tasks):
result = task.result()
assert result.id == f'{i}'
assert result.text == 'I am coming from MyExecutor'
with capsys.disabled():
stdout, stderr = capsys.readouterr()
assert 'BlockingIOError' not in stderr
finally:
stop_event.set()
t.join()
|
import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime
from tests import random_docs
@pytest.fixture(scope='function')
def flow_with_grpc(monkeypatch):
class AuthInterceptor(grpc.aio.ServerInterceptor):
def __init__(self, key):
self._valid_metadata = ('rpc-auth-header', key)
def deny(_, context):
context.abort(grpc.StatusCode.UNAUTHENTICATED, 'Invalid key')
self._deny = grpc.unary_unary_rpc_method_handler(deny)
async def intercept_service(self, continuation, handler_call_details):
meta = handler_call_details.invocation_metadata
metas_dicts = {m.key: m.value for m in meta}
assert 'rpc-auth-header' in metas_dicts
assert (
metas_dicts['rpc-auth-header'] == 'access_key'
), f'Invalid access key detected, got {metas_dicts["rpc-auth-header"]}'
for m in meta:
if m == self._valid_metadata:
return await continuation(handler_call_details)
return self._deny
class AlternativeGRPCGatewayRuntime(_GRPCGatewayRuntime):
async def async_setup(self):
"""
The async method to setup.
Create the gRPC server and expose the port for communication.
"""
if not self.args.proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
if not (is_port_free(__default_host__, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.server = grpc.aio.server(
interceptors=(AuthInterceptor('access_key'),),
options=[
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
],
)
await self._async_setup_server()
monkeypatch.setattr(
'jina.serve.runtimes.gateway.grpc.GRPCGatewayRuntime',
AlternativeGRPCGatewayRuntime,
)
return Flow(protocol='grpc').add()
def test_client_grpc_kwargs(flow_with_grpc):
with flow_with_grpc:
client = Client(
port=flow_with_grpc.port,
host='localhost',
protocol='grpc',
)
meta_data = (('rpc-auth-header', 'invalid_access_key'),)
try:
client.post('', random_docs(1), request_size=1, metadata=meta_data)
except Exception as exc:
assert 'Invalid access key detected, got invalid_access_key' in repr(exc)
|
import os
import grpc
import pytest
from jina import Flow, __default_host__
from jina.clients import Client
from jina.excepts import PortAlreadyUsed
from jina.helper import is_port_free
from jina.serve.runtimes.gateway.grpc import GRPCGatewayRuntime as _GRPCGatewayRuntime
from tests import random_docs
@pytest.fixture(scope='function')
def flow_with_grpc(monkeypatch):
class AuthInterceptor(grpc.aio.ServerInterceptor):
def __init__(self, key):
self._valid_metadata = ('rpc-auth-header', key)
def deny(_, context):
context.abort(grpc.StatusCode.UNAUTHENTICATED, 'Invalid key')
self._deny = grpc.unary_unary_rpc_method_handler(deny)
async def intercept_service(self, continuation, handler_call_details):
meta = handler_call_details.invocation_metadata
metas_dicts = {m.key: m.value for m in meta}
assert 'rpc-auth-header' in metas_dicts
assert (
metas_dicts['rpc-auth-header'] == 'access_key'
), f'Invalid access key detected, got {metas_dicts["rpc-auth-header"]}'
for m in meta:
if m == self._valid_metadata:
return await continuation(handler_call_details)
return self._deny
class AlternativeGRPCGatewayRuntime(_GRPCGatewayRuntime):
async def async_setup(self):
"""
The async method to setup.
Create the gRPC server and expose the port for communication.
"""
if not self.args.proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
if not (is_port_free(__default_host__, self.args.port)):
raise PortAlreadyUsed(f'port:{self.args.port}')
self.server = grpc.aio.server(
interceptors=(AuthInterceptor('access_key'),),
options=[
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
],
)
self._set_topology_graph()
self._set_connection_pool()
await self._async_setup_server()
monkeypatch.setattr(
'jina.serve.runtimes.gateway.grpc.GRPCGatewayRuntime',
AlternativeGRPCGatewayRuntime,
)
return Flow(protocol='grpc').add()
def test_client_grpc_kwargs(flow_with_grpc):
with flow_with_grpc:
client = Client(
port=flow_with_grpc.port,
host='localhost',
protocol='grpc',
)
meta_data = (('rpc-auth-header', 'invalid_access_key'),)
try:
client.post('', random_docs(1), request_size=1, metadata=meta_data)
except Exception as exc:
assert 'Invalid access key detected, got invalid_access_key' in repr(exc)
|
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import json
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=list,
)
items_object: dict = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default_factory=dict,
)
items_str: str = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default="",
)
class Output(BlockSchema):
item: Any = SchemaField(description="The current item in the iteration")
key: Any = SchemaField(
description="The key or index of the current item in the iteration",
)
def __init__(self):
super().__init__(
id="f66a3543-28d3-4ab5-8945-9b336371e2ce",
input_schema=StepThroughItemsBlock.Input,
output_schema=StepThroughItemsBlock.Output,
categories={BlockCategory.LOGIC},
description="Iterates over a list or dictionary and outputs each item.",
test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]},
test_output=[
("item", 1),
("key", 0),
("item", 2),
("key", 1),
("item", 3),
("key", 2),
("item", {"key1": "value1", "key2": "value2"}),
("key", 3),
],
test_mock={},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
for data in [input_data.items, input_data.items_object, input_data.items_str]:
if not data:
continue
if isinstance(data, str):
items = json.loads(data)
else:
items = data
if isinstance(items, dict):
# If items is a dictionary, iterate over its values
for item in items.values():
yield "item", item
yield "key", item
else:
# If items is a list, iterate over the list
for index, item in enumerate(items):
yield "item", item
yield "key", index
|
from typing import Any
from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema
from backend.data.model import SchemaField
from backend.util.json import json
class StepThroughItemsBlock(Block):
class Input(BlockSchema):
items: list = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default=[],
)
items_object: dict = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default={},
)
items_str: str = SchemaField(
advanced=False,
description="The list or dictionary of items to iterate over",
placeholder="[1, 2, 3, 4, 5] or {'key1': 'value1', 'key2': 'value2'}",
default="",
)
class Output(BlockSchema):
item: Any = SchemaField(description="The current item in the iteration")
key: Any = SchemaField(
description="The key or index of the current item in the iteration",
)
def __init__(self):
super().__init__(
id="f66a3543-28d3-4ab5-8945-9b336371e2ce",
input_schema=StepThroughItemsBlock.Input,
output_schema=StepThroughItemsBlock.Output,
categories={BlockCategory.LOGIC},
description="Iterates over a list or dictionary and outputs each item.",
test_input={"items": [1, 2, 3, {"key1": "value1", "key2": "value2"}]},
test_output=[
("item", 1),
("key", 0),
("item", 2),
("key", 1),
("item", 3),
("key", 2),
("item", {"key1": "value1", "key2": "value2"}),
("key", 3),
],
test_mock={},
)
def run(self, input_data: Input, **kwargs) -> BlockOutput:
for data in [input_data.items, input_data.items_object, input_data.items_str]:
if not data:
continue
if isinstance(data, str):
items = json.loads(data)
else:
items = data
if isinstance(items, dict):
# If items is a dictionary, iterate over its values
for item in items.values():
yield "item", item
yield "key", item
else:
# If items is a list, iterate over the list
for index, item in enumerate(items):
yield "item", item
yield "key", index
|
from base64 import b64encode
from typing import Optional
from urllib.parse import urlencode
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from .base import BaseOAuthHandler
class NotionOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at https://developers.notion.com/docs/authorization
Notes:
- Notion uses non-expiring access tokens and therefore doesn't have a refresh flow
- Notion doesn't use scopes
"""
PROVIDER_NAME = ProviderName.NOTION
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.auth_base_url = "https://api.notion.com/v1/oauth/authorize"
self.token_url = "https://api.notion.com/v1/oauth/token"
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"owner": "user",
"state": state,
}
return f"{self.auth_base_url}?{urlencode(params)}"
async def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
request_body = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
}
auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode()
headers = {
"Authorization": f"Basic {auth_str}",
"Accept": "application/json",
}
response = await Requests().post(
self.token_url, json=request_body, headers=headers
)
token_data = response.json()
# Email is only available for non-bot users
email = (
token_data["owner"]["person"]["email"]
if "person" in token_data["owner"]
and "email" in token_data["owner"]["person"]
else None
)
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=token_data.get("workspace_name"),
username=email,
access_token=token_data["access_token"],
refresh_token=None,
access_token_expires_at=None, # Notion tokens don't expire
refresh_token_expires_at=None,
scopes=[],
metadata={
"owner": token_data["owner"],
"bot_id": token_data["bot_id"],
"workspace_id": token_data["workspace_id"],
"workspace_name": token_data.get("workspace_name"),
"workspace_icon": token_data.get("workspace_icon"),
},
)
async def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# Notion doesn't support token revocation
return False
async def _refresh_tokens(
self, credentials: OAuth2Credentials
) -> OAuth2Credentials:
# Notion doesn't support token refresh
return credentials
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
# Notion access tokens don't expire
return False
|
from base64 import b64encode
from typing import Optional
from urllib.parse import urlencode
from backend.data.model import OAuth2Credentials
from backend.integrations.providers import ProviderName
from backend.util.request import Requests
from .base import BaseOAuthHandler
class NotionOAuthHandler(BaseOAuthHandler):
"""
Based on the documentation at https://developers.notion.com/docs/authorization
Notes:
- Notion uses non-expiring access tokens and therefore doesn't have a refresh flow
- Notion doesn't use scopes
"""
PROVIDER_NAME = ProviderName.NOTION
def __init__(self, client_id: str, client_secret: str, redirect_uri: str):
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
self.auth_base_url = "https://api.notion.com/v1/oauth/authorize"
self.token_url = "https://api.notion.com/v1/oauth/token"
def get_login_url(
self, scopes: list[str], state: str, code_challenge: Optional[str]
) -> str:
params = {
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"response_type": "code",
"owner": "user",
"state": state,
}
return f"{self.auth_base_url}?{urlencode(params)}"
def exchange_code_for_tokens(
self, code: str, scopes: list[str], code_verifier: Optional[str]
) -> OAuth2Credentials:
request_body = {
"grant_type": "authorization_code",
"code": code,
"redirect_uri": self.redirect_uri,
}
auth_str = b64encode(f"{self.client_id}:{self.client_secret}".encode()).decode()
headers = {
"Authorization": f"Basic {auth_str}",
"Accept": "application/json",
}
response = Requests().post(self.token_url, json=request_body, headers=headers)
token_data = response.json()
# Email is only available for non-bot users
email = (
token_data["owner"]["person"]["email"]
if "person" in token_data["owner"]
and "email" in token_data["owner"]["person"]
else None
)
return OAuth2Credentials(
provider=self.PROVIDER_NAME,
title=token_data.get("workspace_name"),
username=email,
access_token=token_data["access_token"],
refresh_token=None,
access_token_expires_at=None, # Notion tokens don't expire
refresh_token_expires_at=None,
scopes=[],
metadata={
"owner": token_data["owner"],
"bot_id": token_data["bot_id"],
"workspace_id": token_data["workspace_id"],
"workspace_name": token_data.get("workspace_name"),
"workspace_icon": token_data.get("workspace_icon"),
},
)
def revoke_tokens(self, credentials: OAuth2Credentials) -> bool:
# Notion doesn't support token revocation
return False
def _refresh_tokens(self, credentials: OAuth2Credentials) -> OAuth2Credentials:
# Notion doesn't support token refresh
return credentials
def needs_refresh(self, credentials: OAuth2Credentials) -> bool:
# Notion access tokens don't expire
return False
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
from torchaudio.io import CodecConfig
from . import soundfile_backend
from .backend import Backend
from .common import AudioMetaData
class SoundfileBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
return soundfile_backend.info(uri, format)
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
return soundfile_backend.load(uri, frame_offset, num_frames, normalize, channels_first, format)
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
compression: Optional[Union[CodecConfig, float, int]] = None,
) -> None:
if compression:
raise ValueError("soundfile backend does not support argument `compression`.")
soundfile_backend.save(
uri, src, sample_rate, channels_first, format=format, encoding=encoding, bits_per_sample=bits_per_sample
)
@staticmethod
def can_decode(uri, format) -> bool:
return True
@staticmethod
def can_encode(uri, format) -> bool:
return True
|
import os
from typing import BinaryIO, Optional, Tuple, Union
import torch
from . import soundfile_backend
from .backend import Backend
from .common import AudioMetaData
class SoundfileBackend(Backend):
@staticmethod
def info(uri: Union[BinaryIO, str, os.PathLike], format: Optional[str], buffer_size: int = 4096) -> AudioMetaData:
return soundfile_backend.info(uri, format)
@staticmethod
def load(
uri: Union[BinaryIO, str, os.PathLike],
frame_offset: int = 0,
num_frames: int = -1,
normalize: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
buffer_size: int = 4096,
) -> Tuple[torch.Tensor, int]:
return soundfile_backend.load(uri, frame_offset, num_frames, normalize, channels_first, format)
@staticmethod
def save(
uri: Union[BinaryIO, str, os.PathLike],
src: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
format: Optional[str] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
buffer_size: int = 4096,
) -> None:
soundfile_backend.save(
uri, src, sample_rate, channels_first, format=format, encoding=encoding, bits_per_sample=bits_per_sample
)
@staticmethod
def can_decode(uri, format) -> bool:
return True
@staticmethod
def can_encode(uri, format) -> bool:
return True
|
from pathlib import Path
from llama_index.core.bridge.pydantic import AnyUrl
from llama_index.core.schema import MediaResource
def test_defaults():
m = MediaResource()
assert m.data is None
assert m.embeddings is None
assert m.mimetype is None
assert m.path is None
assert m.url is None
def test_mimetype():
png_1px = b"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
m = MediaResource(data=png_1px, mimetype=None)
assert m.mimetype == "image/png"
def test_mimetype_raw_data():
import requests
resp = requests.get(
"https://storage.googleapis.com/generativeai-downloads/data/scene.jpg"
)
m = MediaResource(data=resp.content)
assert m.mimetype == "image/jpeg"
def test_mimetype_from_path():
m = MediaResource(path=Path("my-image.jpg"), mimetype=None)
assert m.mimetype == "image/jpeg"
def test_mimetype_prioritizes_data():
png_1px = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
m = MediaResource(
data=png_1px.encode("utf-8"), mimetype=None, path=Path("my_image.jpg")
)
assert m.mimetype == "image/png"
def test_hash():
assert (
MediaResource(
data=b"test bytes",
path=Path("foo/bar/baz"),
url=AnyUrl("http://example.com"),
text="some text",
).hash
== "04414a5f03ad7fa055229b4d3690d47427cb0b65bc7eb8f770d1ecbd54ab4909"
)
assert MediaResource().hash == ""
|
from pathlib import Path
from llama_index.core.bridge.pydantic import AnyUrl
from llama_index.core.schema import MediaResource
def test_defaults():
m = MediaResource()
assert m.data is None
assert m.embeddings is None
assert m.mimetype is None
assert m.path is None
assert m.url is None
def test_mimetype():
png_1px = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
m = MediaResource(data=png_1px.encode("utf-8"), mimetype=None)
assert m.mimetype == "image/png"
def test_mimetype_from_path():
m = MediaResource(path="my-image.jpg", mimetype=None)
assert m.mimetype == "image/jpeg"
def test_mimetype_prioritizes_data():
png_1px = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg=="
m = MediaResource(data=png_1px.encode("utf-8"), mimetype=None, path="my_image.jpg")
assert m.mimetype == "image/png"
def test_hash():
assert (
MediaResource(
data=b"test bytes",
path=Path("foo/bar/baz"),
url=AnyUrl("http://example.com"),
text="some text",
).hash
== "04414a5f03ad7fa055229b4d3690d47427cb0b65bc7eb8f770d1ecbd54ab4909"
)
assert MediaResource().hash == ""
|
"""
Comparison between grid search and successive halving
=====================================================
This example compares the parameter search performed by
:class:`~sklearn.model_selection.HalvingGridSearchCV` and
:class:`~sklearn.model_selection.GridSearchCV`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.experimental import enable_halving_search_cv # noqa: F401
from sklearn.model_selection import GridSearchCV, HalvingGridSearchCV
from sklearn.svm import SVC
# %%
# We first define the parameter space for an :class:`~sklearn.svm.SVC`
# estimator, and compute the time required to train a
# :class:`~sklearn.model_selection.HalvingGridSearchCV` instance, as well as a
# :class:`~sklearn.model_selection.GridSearchCV` instance.
rng = np.random.RandomState(0)
X, y = datasets.make_classification(n_samples=1000, random_state=rng)
gammas = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]
Cs = [1, 10, 100, 1e3, 1e4, 1e5]
param_grid = {"gamma": gammas, "C": Cs}
clf = SVC(random_state=rng)
tic = time()
gsh = HalvingGridSearchCV(
estimator=clf, param_grid=param_grid, factor=2, random_state=rng
)
gsh.fit(X, y)
gsh_time = time() - tic
tic = time()
gs = GridSearchCV(estimator=clf, param_grid=param_grid)
gs.fit(X, y)
gs_time = time() - tic
# %%
# We now plot heatmaps for both search estimators.
def make_heatmap(ax, gs, is_sh=False, make_cbar=False):
"""Helper to make a heatmap."""
results = pd.DataFrame(gs.cv_results_)
results[["param_C", "param_gamma"]] = results[["param_C", "param_gamma"]].astype(
np.float64
)
if is_sh:
# SH dataframe: get mean_test_score values for the highest iter
scores_matrix = results.sort_values("iter").pivot_table(
index="param_gamma",
columns="param_C",
values="mean_test_score",
aggfunc="last",
)
else:
scores_matrix = results.pivot(
index="param_gamma", columns="param_C", values="mean_test_score"
)
im = ax.imshow(scores_matrix)
ax.set_xticks(np.arange(len(Cs)))
ax.set_xticklabels(["{:.0E}".format(x) for x in Cs])
ax.set_xlabel("C", fontsize=15)
ax.set_yticks(np.arange(len(gammas)))
ax.set_yticklabels(["{:.0E}".format(x) for x in gammas])
ax.set_ylabel("gamma", fontsize=15)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
if is_sh:
iterations = results.pivot_table(
index="param_gamma", columns="param_C", values="iter", aggfunc="max"
).values
for i in range(len(gammas)):
for j in range(len(Cs)):
ax.text(
j,
i,
iterations[i, j],
ha="center",
va="center",
color="w",
fontsize=20,
)
if make_cbar:
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
cbar_ax.set_ylabel("mean_test_score", rotation=-90, va="bottom", fontsize=15)
fig, axes = plt.subplots(ncols=2, sharey=True)
ax1, ax2 = axes
make_heatmap(ax1, gsh, is_sh=True)
make_heatmap(ax2, gs, make_cbar=True)
ax1.set_title("Successive Halving\ntime = {:.3f}s".format(gsh_time), fontsize=15)
ax2.set_title("GridSearch\ntime = {:.3f}s".format(gs_time), fontsize=15)
plt.show()
# %%
# The heatmaps show the mean test score of the parameter combinations for an
# :class:`~sklearn.svm.SVC` instance. The
# :class:`~sklearn.model_selection.HalvingGridSearchCV` also shows the
# iteration at which the combinations where last used. The combinations marked
# as ``0`` were only evaluated at the first iteration, while the ones with
# ``5`` are the parameter combinations that are considered the best ones.
#
# We can see that the :class:`~sklearn.model_selection.HalvingGridSearchCV`
# class is able to find parameter combinations that are just as accurate as
# :class:`~sklearn.model_selection.GridSearchCV`, in much less time.
|
"""
Comparison between grid search and successive halving
=====================================================
This example compares the parameter search performed by
:class:`~sklearn.model_selection.HalvingGridSearchCV` and
:class:`~sklearn.model_selection.GridSearchCV`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import GridSearchCV, HalvingGridSearchCV
from sklearn.svm import SVC
# %%
# We first define the parameter space for an :class:`~sklearn.svm.SVC`
# estimator, and compute the time required to train a
# :class:`~sklearn.model_selection.HalvingGridSearchCV` instance, as well as a
# :class:`~sklearn.model_selection.GridSearchCV` instance.
rng = np.random.RandomState(0)
X, y = datasets.make_classification(n_samples=1000, random_state=rng)
gammas = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]
Cs = [1, 10, 100, 1e3, 1e4, 1e5]
param_grid = {"gamma": gammas, "C": Cs}
clf = SVC(random_state=rng)
tic = time()
gsh = HalvingGridSearchCV(
estimator=clf, param_grid=param_grid, factor=2, random_state=rng
)
gsh.fit(X, y)
gsh_time = time() - tic
tic = time()
gs = GridSearchCV(estimator=clf, param_grid=param_grid)
gs.fit(X, y)
gs_time = time() - tic
# %%
# We now plot heatmaps for both search estimators.
def make_heatmap(ax, gs, is_sh=False, make_cbar=False):
"""Helper to make a heatmap."""
results = pd.DataFrame(gs.cv_results_)
results[["param_C", "param_gamma"]] = results[["param_C", "param_gamma"]].astype(
np.float64
)
if is_sh:
# SH dataframe: get mean_test_score values for the highest iter
scores_matrix = results.sort_values("iter").pivot_table(
index="param_gamma",
columns="param_C",
values="mean_test_score",
aggfunc="last",
)
else:
scores_matrix = results.pivot(
index="param_gamma", columns="param_C", values="mean_test_score"
)
im = ax.imshow(scores_matrix)
ax.set_xticks(np.arange(len(Cs)))
ax.set_xticklabels(["{:.0E}".format(x) for x in Cs])
ax.set_xlabel("C", fontsize=15)
ax.set_yticks(np.arange(len(gammas)))
ax.set_yticklabels(["{:.0E}".format(x) for x in gammas])
ax.set_ylabel("gamma", fontsize=15)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
if is_sh:
iterations = results.pivot_table(
index="param_gamma", columns="param_C", values="iter", aggfunc="max"
).values
for i in range(len(gammas)):
for j in range(len(Cs)):
ax.text(
j,
i,
iterations[i, j],
ha="center",
va="center",
color="w",
fontsize=20,
)
if make_cbar:
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
cbar_ax.set_ylabel("mean_test_score", rotation=-90, va="bottom", fontsize=15)
fig, axes = plt.subplots(ncols=2, sharey=True)
ax1, ax2 = axes
make_heatmap(ax1, gsh, is_sh=True)
make_heatmap(ax2, gs, make_cbar=True)
ax1.set_title("Successive Halving\ntime = {:.3f}s".format(gsh_time), fontsize=15)
ax2.set_title("GridSearch\ntime = {:.3f}s".format(gs_time), fontsize=15)
plt.show()
# %%
# The heatmaps show the mean test score of the parameter combinations for an
# :class:`~sklearn.svm.SVC` instance. The
# :class:`~sklearn.model_selection.HalvingGridSearchCV` also shows the
# iteration at which the combinations where last used. The combinations marked
# as ``0`` were only evaluated at the first iteration, while the ones with
# ``5`` are the parameter combinations that are considered the best ones.
#
# We can see that the :class:`~sklearn.model_selection.HalvingGridSearchCV`
# class is able to find parameter combinations that are just as accurate as
# :class:`~sklearn.model_selection.GridSearchCV`, in much less time.
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import SQLDatabase
from langchain_community.utilities.sql_database import truncate_word
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"truncate_word": "langchain_community.utilities.sql_database",
"SQLDatabase": "langchain_community.utilities",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SQLDatabase",
"truncate_word",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.utilities import SQLDatabase
from langchain_community.utilities.sql_database import truncate_word
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"truncate_word": "langchain_community.utilities.sql_database",
"SQLDatabase": "langchain_community.utilities",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"truncate_word",
"SQLDatabase",
]
|
from torio.io import CodecConfig, StreamingMediaDecoder as StreamReader, StreamingMediaEncoder as StreamWriter
from torchaudio._internal.module_utils import dropping_io_support, dropping_support
from ._effector import AudioEffector
from ._playback import play_audio as _play_audio
CodecConfig.__init__ = dropping_io_support(CodecConfig.__init__)
StreamReader.__init__ = dropping_io_support(StreamReader.__init__)
StreamWriter.__init__ = dropping_io_support(StreamWriter.__init__)
AudioEffector.__init__ = dropping_support(AudioEffector.__init__)
play_audio = dropping_io_support(_play_audio)
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
from torio.io import CodecConfig, StreamingMediaDecoder as StreamReader, StreamingMediaEncoder as StreamWriter
from torchaudio._internal.module_utils import dropping_support
from ._effector import AudioEffector
from ._playback import play_audio as _play_audio
CodecConfig.__init__ = dropping_support(CodecConfig.__init__)
StreamReader.__init__ = dropping_support(StreamReader.__init__)
StreamWriter.__init__ = dropping_support(StreamWriter.__init__)
AudioEffector.__init__ = dropping_support(AudioEffector.__init__)
play_audio = dropping_support(_play_audio)
__all__ = [
"AudioEffector",
"StreamReader",
"StreamWriter",
"CodecConfig",
"play_audio",
]
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
split: datasets.NamedSplit = datasets.Split.TRAIN
def __post_init__(self):
super().__post_init__()
if self.generator is None:
raise ValueError("generator must be specified")
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=self.config.split, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
yield from enumerate(self.config.generator(**gen_kwargs))
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
split: datasets.NamedSplit = datasets.Split.TRAIN
def __post_init__(self):
super().__post_init__()
if self.generator is None:
raise ValueError("generator must be specified")
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=self.config.split, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex
|
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
VERSION = get_version("keras/src/version.py")
setup(
name="keras",
description="Multi-backend Keras.",
long_description_content_type="text/markdown",
long_description=README,
version=VERSION,
url="https://github.com/keras-team/keras",
author="Keras team",
author_email="keras-users@googlegroups.com",
license="Apache License 2.0",
install_requires=[
"absl-py",
"numpy",
"rich",
"namex",
"h5py",
"optree",
"ml-dtypes",
"packaging",
],
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
)
|
"""Setup script."""
import os
import pathlib
from setuptools import find_packages
from setuptools import setup
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
VERSION = get_version("keras/src/version.py")
setup(
name="keras",
description="Multi-backend Keras.",
long_description_content_type="text/markdown",
long_description=README,
version=VERSION,
url="https://github.com/keras-team/keras",
author="Keras team",
author_email="keras-users@googlegroups.com",
license="Apache License 2.0",
install_requires=[
"absl-py",
"numpy",
"rich",
"namex",
"h5py",
"optree",
"ml-dtypes",
],
# Supported Python versions
python_requires=">=3.9",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: Unix",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("*_test.py",)),
)
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"MelSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
from ._multi_channel import MVDR, PSD, RTFMVDR, SoudenMVDR
from ._transforms import (
AmplitudeToDB,
BarkScale,
BarkSpectrogram,
ComputeDeltas,
Fade,
FrequencyMasking,
GriffinLim,
InverseBarkScale,
InverseMelScale,
InverseSpectrogram,
LFCC,
Loudness,
MelScale,
MelSpectrogram,
MFCC,
MuLawDecoding,
MuLawEncoding,
PitchShift,
Resample,
RNNTLoss,
SlidingWindowCmn,
SpectralCentroid,
Spectrogram,
TimeMasking,
TimeStretch,
Vad,
Vol,
)
__all__ = [
"AmplitudeToDB",
"ComputeDeltas",
"Fade",
"FrequencyMasking",
"GriffinLim",
"InverseMelScale",
"InverseBarkScale",
"InverseSpectrogram",
"LFCC",
"Loudness",
"MFCC",
"MVDR",
"MelScale",
"BarkScale",
"MelSpectrogram",
"BarkSpectrogram",
"MuLawDecoding",
"MuLawEncoding",
"PSD",
"PitchShift",
"RNNTLoss",
"RTFMVDR",
"Resample",
"SlidingWindowCmn",
"SoudenMVDR",
"SpectralCentroid",
"Spectrogram",
"TimeMasking",
"TimeStretch",
"Vad",
"Vol",
]
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 30
assert callback_from_disk.before_iteration is False
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
def test_early_stopping_callback_rejects_invalid_stopping_rounds_with_informative_errors():
with pytest.raises(TypeError, match="early_stopping_round should be an integer. Got 'str'"):
lgb.early_stopping(stopping_rounds="neverrrr")
@pytest.mark.parametrize("stopping_rounds", [-10, -1, 0])
def test_early_stopping_callback_accepts_non_positive_stopping_rounds(stopping_rounds):
cb = lgb.early_stopping(stopping_rounds=stopping_rounds)
assert cb.enabled is False
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_log_evaluation_callback_is_picklable(serializer):
periods = 42
callback = lgb.log_evaluation(period=periods)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is False
assert callback.period == callback_from_disk.period
assert callback.period == periods
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_record_evaluation_callback_is_picklable(serializer):
results = {}
callback = lgb.record_evaluation(eval_result=results)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 20
assert callback_from_disk.before_iteration is False
assert callback.eval_result == callback_from_disk.eval_result
assert callback.eval_result is results
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_reset_parameter_callback_is_picklable(serializer):
params = {"bagging_fraction": [0.7] * 5 + [0.6] * 5, "feature_fraction": reset_feature_fraction}
callback = lgb.reset_parameter(**params)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is True
assert callback.kwargs == callback_from_disk.kwargs
assert callback.kwargs == params
|
# coding: utf-8
import pytest
import lightgbm as lgb
from .utils import SERIALIZERS, pickle_and_unpickle_object
def reset_feature_fraction(boosting_round):
return 0.6 if boosting_round < 15 else 0.8
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_early_stopping_callback_is_picklable(serializer):
rounds = 5
callback = lgb.early_stopping(stopping_rounds=rounds)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 30
assert callback_from_disk.before_iteration is False
assert callback.stopping_rounds == callback_from_disk.stopping_rounds
assert callback.stopping_rounds == rounds
def test_early_stopping_callback_rejects_invalid_stopping_rounds_with_informative_errors():
with pytest.raises(ValueError, match="stopping_rounds should be an integer and greater than 0. got: 0"):
lgb.early_stopping(stopping_rounds=0)
with pytest.raises(ValueError, match="stopping_rounds should be an integer and greater than 0. got: -1"):
lgb.early_stopping(stopping_rounds=-1)
with pytest.raises(ValueError, match="stopping_rounds should be an integer and greater than 0. got: neverrrr"):
lgb.early_stopping(stopping_rounds="neverrrr")
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_log_evaluation_callback_is_picklable(serializer):
periods = 42
callback = lgb.log_evaluation(period=periods)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is False
assert callback.period == callback_from_disk.period
assert callback.period == periods
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_record_evaluation_callback_is_picklable(serializer):
results = {}
callback = lgb.record_evaluation(eval_result=results)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 20
assert callback_from_disk.before_iteration is False
assert callback.eval_result == callback_from_disk.eval_result
assert callback.eval_result is results
@pytest.mark.parametrize("serializer", SERIALIZERS)
def test_reset_parameter_callback_is_picklable(serializer):
params = {"bagging_fraction": [0.7] * 5 + [0.6] * 5, "feature_fraction": reset_feature_fraction}
callback = lgb.reset_parameter(**params)
callback_from_disk = pickle_and_unpickle_object(obj=callback, serializer=serializer)
assert callback_from_disk.order == 10
assert callback_from_disk.before_iteration is True
assert callback.kwargs == callback_from_disk.kwargs
assert callback.kwargs == params
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.reuters import get_label_names as get_label_names
from keras.src.datasets.reuters import get_word_index as get_word_index
from keras.src.datasets.reuters import load_data as load_data
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.reuters import get_label_names
from keras.src.datasets.reuters import get_word_index
from keras.src.datasets.reuters import load_data
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner configuration."""
import os
import platform
import sys
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# Lint for undefined variables is disabled as config is not defined inside this
# file, instead config is injected by way of evaluating runlit.cfg.py from
# runlit.site.cfg.py which in turn is evaluated by lit.py. The structure is
# common for lit tests and intended to only persist temporarily (b/136126535).
# pylint: disable=undefined-variable
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'MLIR ' + os.path.basename(config.mlir_test_dir)
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.cc', '.hlo', '.json', '.mlir', '.pbtxt', '.py']
# test_source_root: The root path where tests are located.
config.test_source_root = config.mlir_test_dir
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.environ['RUNFILES_DIR']
if platform.system() == 'Windows':
tool_patterns = [
ToolSubst('FileCheck.exe', unresolved='fatal'),
# Handle these specially as they are strings searched for during testing.
ToolSubst('count.exe', unresolved='fatal'),
ToolSubst('not.exe', unresolved='fatal')
]
llvm_config.config.substitutions.append(
('%python', '"%s"' % (sys.executable)))
llvm_config.add_tool_substitutions(tool_patterns,
[llvm_config.config.llvm_tools_dir])
else:
llvm_config.use_default_substitutions()
llvm_config.config.substitutions.append(
('%tfrt_bindir', 'tensorflow/compiler/aot'))
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = config.mlir_tf_tools_dirs + [
config.mlir_tools_dir, config.llvm_tools_dir
]
tool_names = [
'dtensor-opt',
'flatbuffer_to_string',
'flatbuffer_translate',
'hlo_to_kernel',
'json_to_flatbuffer',
'kernel-gen-opt',
'litert-opt',
'mlir-hlo-opt',
'mlir-opt',
'mlir-tflite-runner',
'mlir-translate',
'odml-to-stablehlo-opt',
'odml_to_stablehlo',
'odml-converter',
'stable-quant-opt',
'tac-opt-all-backends',
'tac-translate',
'tf-mlir-translate',
'tf-opt',
'tf-quant-opt',
'tf-reduce',
'tf-tfrt-opt',
'tf_tfjs_translate',
'tf_tfl_translate',
'tfcompile',
'tfg-opt-no-passes',
'tfg-transforms-opt',
'tfg-translate',
'tfjs-opt',
]
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
llvm_config.add_tool_substitutions(tools, tool_dirs)
# pylint: enable=undefined-variable
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lit runner configuration."""
import os
import platform
import sys
import lit.formats
from lit.llvm import llvm_config
from lit.llvm.subst import ToolSubst
# Lint for undefined variables is disabled as config is not defined inside this
# file, instead config is injected by way of evaluating runlit.cfg.py from
# runlit.site.cfg.py which in turn is evaluated by lit.py. The structure is
# common for lit tests and intended to only persist temporarily (b/136126535).
# pylint: disable=undefined-variable
# Configuration file for the 'lit' test runner.
# name: The name of this test suite.
config.name = 'MLIR ' + os.path.basename(config.mlir_test_dir)
config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell)
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.cc', '.hlo', '.json', '.mlir', '.pbtxt', '.py']
# test_source_root: The root path where tests are located.
config.test_source_root = config.mlir_test_dir
# test_exec_root: The root path where tests should be run.
config.test_exec_root = os.environ['RUNFILES_DIR']
if platform.system() == 'Windows':
tool_patterns = [
ToolSubst('FileCheck.exe', unresolved='fatal'),
# Handle these specially as they are strings searched for during testing.
ToolSubst('count.exe', unresolved='fatal'),
ToolSubst('not.exe', unresolved='fatal')
]
llvm_config.config.substitutions.append(
('%python', '"%s"' % (sys.executable)))
llvm_config.add_tool_substitutions(tool_patterns,
[llvm_config.config.llvm_tools_dir])
else:
llvm_config.use_default_substitutions()
llvm_config.config.substitutions.append(
('%tfrt_bindir', 'tensorflow/compiler/aot'))
# Tweak the PATH to include the tools dir.
llvm_config.with_environment('PATH', config.llvm_tools_dir, append_path=True)
tool_dirs = config.mlir_tf_tools_dirs + [
config.mlir_tools_dir, config.llvm_tools_dir
]
tool_names = [
'dtensor-opt',
'flatbuffer_to_string',
'flatbuffer_translate',
'hlo_to_kernel',
'json_to_flatbuffer',
'kernel-gen-opt',
'mlir-hlo-opt',
'mlir-opt',
'mlir-tflite-runner',
'mlir-translate',
'odml-to-stablehlo-opt',
'odml_to_stablehlo',
'odml-converter',
'stable-quant-opt',
'tac-opt-all-backends',
'tac-translate',
'tf-mlir-translate',
'tf-opt',
'tf-quant-opt',
'tf-reduce',
'tf-tfrt-opt',
'tf_tfjs_translate',
'tf_tfl_translate',
'tfcompile',
'tfg-opt-no-passes',
'tfg-transforms-opt',
'tfg-translate',
'tfjs-opt',
]
tools = [ToolSubst(s, unresolved='ignore') for s in tool_names]
llvm_config.add_tool_substitutions(tools, tool_dirs)
# pylint: enable=undefined-variable
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.visualizer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = BaseDataElement(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = BaseDataElement(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = BaseDataElement(
metainfo=dict(
scale=(15, 15), ori_height=5, ori_width=5, img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = BaseDataElement(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = BaseDataElement(
metainfo=dict(ori_height=15, ori_width=15, img_path='tmp.jpg'))
pred_datasamples = [BaseDataElement()]
data_batch = [dict(inputs=inputs, data_sample=gt_datasamples)]
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import Mock
import torch
from mmengine.data import BaseDataElement
from mmengine.hooks import NaiveVisualizationHook
class TestNaiveVisualizationHook:
def test_after_train_iter(self):
naive_visualization_hook = NaiveVisualizationHook()
runner = Mock(iter=1)
runner.writer.add_image = Mock()
inputs = torch.randn(1, 3, 15, 15)
batch_idx = 10
# test with normalize, resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
img_norm_cfg=dict(
mean=(0, 0, 0), std=(0.5, 0.5, 0.5), to_bgr=True),
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg'))
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with resize, pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(10, 10),
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only resize
gt_datasamples = [
BaseDataElement(
metainfo=dict(
scale=(15, 15),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test with only pad
gt_datasamples = [
BaseDataElement(
metainfo=dict(
pad_shape=(15, 15, 3),
ori_height=5,
ori_width=5,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
# test no transform
gt_datasamples = [
BaseDataElement(
metainfo=dict(ori_height=15, ori_width=15,
img_path='tmp.jpg')),
]
pred_datasamples = [BaseDataElement()]
data_batch = (inputs, gt_datasamples)
naive_visualization_hook.after_test_iter(runner, batch_idx, data_batch,
pred_datasamples)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs:
Optional[Union[dict, Sequence[BaseDataSample]]] = None)\
-> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (dict or sequence, optional): Outputs from model.
Defaults to None.
"""
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner) -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner) -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._after_epoch:
torch.cuda.empty_cache()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
import torch
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class EmptyCacheHook(Hook):
"""Releases all unoccupied cached GPU memory during the process of
training.
Args:
before_epoch (bool): Whether to release cache before an epoch. Defaults
to False.
after_epoch (bool): Whether to release cache after an epoch. Defaults
to True.
after_iter (bool): Whether to release cache after an iteration.
Defaults to False.
"""
priority = 'NORMAL'
def __init__(self,
before_epoch: bool = False,
after_epoch: bool = True,
after_iter: bool = False) -> None:
self._before_epoch = before_epoch
self._after_epoch = after_epoch
self._after_iter = after_iter
def after_iter(self,
runner,
data_batch: DATA_BATCH = None,
outputs: Optional[Sequence[BaseDataSample]] = None) -> None:
"""Empty cache after an iteration.
Args:
runner (Runner): The runner of the training process.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. Defaults to None.
outputs (Sequence[BaseDataSample]): Outputs from model.
Defaults to None.
"""
if self._after_iter:
torch.cuda.empty_cache()
def before_epoch(self, runner) -> None:
"""Empty cache before an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._before_epoch:
torch.cuda.empty_cache()
def after_epoch(self, runner) -> None:
"""Empty cache after an epoch.
Args:
runner (Runner): The runner of the training process.
"""
if self._after_epoch:
torch.cuda.empty_cache()
|
import functools
import os
import os.path
import pathlib
from collections.abc import Collection
from typing import Any, BinaryIO, Optional, Union
from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import EncodedData, EncodedImage
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _prepare_sample(
data: tuple[str, BinaryIO],
*,
root: pathlib.Path,
categories: list[str],
) -> dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).relative_to(root).parts[0]
return dict(
path=path,
data=EncodedData.from_file(buffer),
label=Label.from_category(category, categories=categories),
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> tuple[IterDataPipe, list[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[list[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = FileOpener(dp, mode="rb")
return Mapper(dp, functools.partial(_prepare_sample, root=root, categories=categories)), categories
def _data_to_image_key(sample: dict[str, Any]) -> dict[str, Any]:
sample["image"] = EncodedImage(sample.pop("data").data)
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> tuple[IterDataPipe, list[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
|
import functools
import os
import os.path
import pathlib
from typing import Any, BinaryIO, Collection, Dict, List, Optional, Tuple, Union
from torchdata.datapipes.iter import FileLister, FileOpener, Filter, IterDataPipe, Mapper
from torchvision.prototype.datasets.utils import EncodedData, EncodedImage
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
__all__ = ["from_data_folder", "from_image_folder"]
def _is_not_top_level_file(path: str, *, root: pathlib.Path) -> bool:
rel_path = pathlib.Path(path).relative_to(root)
return rel_path.is_dir() or rel_path.parent != pathlib.Path(".")
def _prepare_sample(
data: Tuple[str, BinaryIO],
*,
root: pathlib.Path,
categories: List[str],
) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).relative_to(root).parts[0]
return dict(
path=path,
data=EncodedData.from_file(buffer),
label=Label.from_category(category, categories=categories),
)
def from_data_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Optional[Collection[str]] = None,
recursive: bool = True,
) -> Tuple[IterDataPipe, List[str]]:
root = pathlib.Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
masks: Union[List[str], str] = [f"*.{ext}" for ext in valid_extensions] if valid_extensions is not None else ""
dp = FileLister(str(root), recursive=recursive, masks=masks)
dp: IterDataPipe = Filter(dp, functools.partial(_is_not_top_level_file, root=root))
dp = hint_sharding(dp)
dp = hint_shuffling(dp)
dp = FileOpener(dp, mode="rb")
return Mapper(dp, functools.partial(_prepare_sample, root=root, categories=categories)), categories
def _data_to_image_key(sample: Dict[str, Any]) -> Dict[str, Any]:
sample["image"] = EncodedImage(sample.pop("data").data)
return sample
def from_image_folder(
root: Union[str, pathlib.Path],
*,
valid_extensions: Collection[str] = ("jpg", "jpeg", "png", "ppm", "bmp", "pgm", "tif", "tiff", "webp"),
**kwargs: Any,
) -> Tuple[IterDataPipe, List[str]]:
valid_extensions = [valid_extension for ext in valid_extensions for valid_extension in (ext.lower(), ext.upper())]
dp, categories = from_data_folder(root, valid_extensions=valid_extensions, **kwargs)
return Mapper(dp, _data_to_image_key), categories
|
__version__ = '0.13.34'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
__version__ = '0.13.33'
import os
from .document import Document
from .array import DocumentArray
from .dataclasses import dataclass, field
if 'DA_RICH_HANDLER' in os.environ:
from rich.traceback import install
install()
|
import os.path
from typing import Any, Callable, Optional, Tuple
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format.
Args:
root (string): Root directory of the dataset where the data is stored.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np.ndarray of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
|
import os.path
from typing import Any, Callable, Optional, Tuple
import numpy as np
from PIL import Image
from .utils import check_integrity, download_url, verify_str_arg
from .vision import VisionDataset
class SVHN(VisionDataset):
"""`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset.
Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset,
we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which
expect the class labels to be in the range `[0, C-1]`
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format.
Args:
root (string): Root directory of the dataset where the data is stored.
split (string): One of {'train', 'test', 'extra'}.
Accordingly dataset is selected. 'extra' is Extra training set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
split_list = {
"train": [
"http://ufldl.stanford.edu/housenumbers/train_32x32.mat",
"train_32x32.mat",
"e26dedcc434d2e4c54c9b2d4a06d8373",
],
"test": [
"http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"test_32x32.mat",
"eb5a983be6a315427106f1b164d9cef3",
],
"extra": [
"http://ufldl.stanford.edu/housenumbers/extra_32x32.mat",
"extra_32x32.mat",
"a93ce644f1a588dc4d68dda5feec44a7",
],
}
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self.split = verify_str_arg(split, "split", tuple(self.split_list.keys()))
self.url = self.split_list[split][0]
self.filename = self.split_list[split][1]
self.file_md5 = self.split_list[split][2]
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
# import here rather than at top of file because this is
# an optional dependency for torchvision
import scipy.io as sio
# reading(loading) mat file as array
loaded_mat = sio.loadmat(os.path.join(self.root, self.filename))
self.data = loaded_mat["X"]
# loading from the .mat file gives an np.ndarray of type np.uint8
# converting to np.int64, so that we have a LongTensor after
# the conversion from the numpy array
# the squeeze is needed to obtain a 1D tensor
self.labels = loaded_mat["y"].astype(np.int64).squeeze()
# the svhn dataset assigns the class label "10" to the digit 0
# this makes it inconsistent with several loss functions
# which expect the class labels to be in the range [0, C-1]
np.place(self.labels, self.labels == 10, 0)
self.data = np.transpose(self.data, (3, 2, 0, 1))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], int(self.labels[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(np.transpose(img, (1, 2, 0)))
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
md5 = self.split_list[self.split][2]
fpath = os.path.join(root, self.filename)
return check_integrity(fpath, md5)
def download(self) -> None:
md5 = self.split_list[self.split][2]
download_url(self.url, self.root, self.filename, md5)
def extra_repr(self) -> str:
return "Split: {split}".format(**self.__dict__)
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(type='LoadImageFromFile', backend_args={{_base_.backend_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
_base_ = './fcos_r50-caffe_fpn_gn-head_1x_coco.py'
# dataset settings
train_pipeline = [
dict(
type='LoadImageFromFile',
file_client_args={{_base_.file_client_args}}),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='RandomChoiceResize',
scale=[(1333, 640), (1333, 800)],
keep_ratio=True),
dict(type='RandomFlip', prob=0.5),
dict(type='PackDetInputs')
]
train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
# training schedule for 2x
max_epochs = 24
train_cfg = dict(max_epochs=max_epochs)
# learning rate
param_scheduler = [
dict(type='ConstantLR', factor=1.0 / 3, by_epoch=False, begin=0, end=500),
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[16, 22],
gamma=0.1)
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import (find_latest_checkpoint, get_test_pipeline_cfg,
update_data_root)
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType', 'get_test_pipeline_cfg'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .collect_env import collect_env
from .compat_config import compat_cfg
from .dist_utils import (all_reduce_dict, allreduce_grads, reduce_mean,
sync_random_seed)
from .logger import get_caller_name, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .setup_env import register_all_modules, setup_multi_processes
from .split_batch import split_batch
from .typing import (ConfigType, InstanceList, MultiConfig, OptConfigType,
OptInstanceList, OptMultiConfig, OptPixelList, PixelList,
RangeType)
__all__ = [
'collect_env', 'find_latest_checkpoint', 'update_data_root',
'setup_multi_processes', 'get_caller_name', 'log_img_scale', 'compat_cfg',
'split_batch', 'register_all_modules', 'replace_cfg_vals', 'AvoidOOM',
'AvoidCUDAOOM', 'all_reduce_dict', 'allreduce_grads', 'reduce_mean',
'sync_random_seed', 'ConfigType', 'InstanceList', 'MultiConfig',
'OptConfigType', 'OptInstanceList', 'OptMultiConfig', 'OptPixelList',
'PixelList', 'RangeType'
]
|
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from torchvision.prototype.tv_tensors import Label
from torchvision.tv_tensors import Image
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
import io
import pathlib
from collections import namedtuple
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
from torchdata.datapipes.iter import IterDataPipe, Mapper, Zipper
from torchvision.datapoints import Image
from torchvision.prototype.datapoints import Label
from torchvision.prototype.datasets.utils import Dataset, GDriveResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling
from .._api import register_dataset, register_info
NAME = "pcam"
class PCAMH5Reader(IterDataPipe[Tuple[str, io.IOBase]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[str, io.IOBase]],
key: Optional[str] = None, # Note: this key thing might be very specific to the PCAM dataset
) -> None:
self.datapipe = datapipe
self.key = key
def __iter__(self) -> Iterator[Tuple[str, io.IOBase]]:
import h5py
for _, handle in self.datapipe:
try:
with h5py.File(handle) as data:
if self.key is not None:
data = data[self.key]
yield from data
finally:
handle.close()
_Resource = namedtuple("_Resource", ("file_name", "gdrive_id", "sha256"))
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=["0", "1"])
@register_dataset(NAME)
class PCAM(Dataset):
# TODO write proper docstring
"""PCAM Dataset
homepage="https://github.com/basveeling/pcam"
"""
def __init__(
self, root: Union[str, pathlib.Path], split: str = "train", *, skip_integrity_check: bool = False
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "val", "test"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("h5py",))
_RESOURCES = {
"train": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_train_x.h5.gz",
gdrive_id="1Ka0XfEMiwgCYPdTI-vv6eUElOBnKFKQ2",
sha256="d619e741468a7ab35c7e4a75e6821b7e7e6c9411705d45708f2a0efc8960656c",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_train_y.h5.gz",
gdrive_id="1269yhu3pZDP8UYFQs-NYs3FPwuK-nGSG",
sha256="b74126d2c01b20d3661f9b46765d29cf4e4fba6faba29c8e0d09d406331ab75a",
),
),
"test": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_test_x.h5.gz",
gdrive_id="1qV65ZqZvWzuIVthK8eVDhIwrbnsJdbg_",
sha256="79174c2201ad521602a5888be8f36ee10875f37403dd3f2086caf2182ef87245",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_test_y.h5.gz",
gdrive_id="17BHrSrwWKjYsOgTMmoqrIjDy6Fa2o_gP",
sha256="0a522005fccc8bbd04c5a117bfaf81d8da2676f03a29d7499f71d0a0bd6068ef",
),
),
"val": (
_Resource( # Images
file_name="camelyonpatch_level_2_split_valid_x.h5.gz",
gdrive_id="1hgshYGWK8V-eGRy8LToWJJgDU_rXWVJ3",
sha256="f82ee1670d027b4ec388048d9eabc2186b77c009655dae76d624c0ecb053ccb2",
),
_Resource( # Targets
file_name="camelyonpatch_level_2_split_valid_y.h5.gz",
gdrive_id="1bH8ZRbhSVAhScTS0p9-ZzGnX91cHT3uO",
sha256="ce1ae30f08feb468447971cfd0472e7becd0ad96d877c64120c72571439ae48c",
),
),
}
def _resources(self) -> List[OnlineResource]:
return [ # = [images resource, targets resource]
GDriveResource(file_name=file_name, id=gdrive_id, sha256=sha256, preprocess="decompress")
for file_name, gdrive_id, sha256 in self._RESOURCES[self._split]
]
def _prepare_sample(self, data: Tuple[Any, Any]) -> Dict[str, Any]:
image, target = data # They're both numpy arrays at this point
return {
"image": Image(image.transpose(2, 0, 1)),
"label": Label(target.item(), categories=self._categories),
}
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
images_dp, targets_dp = resource_dps
images_dp = PCAMH5Reader(images_dp, key="x")
targets_dp = PCAMH5Reader(targets_dp, key="y")
dp = Zipper(images_dp, targets_dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return 262_144 if self._split == "train" else 32_768
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: list[str] | None = None, label: int | float = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
"""
This file contains deprecated code that can only be used with the old `model.fit`-style Sentence Transformers v2.X training.
It exists for backwards compatibility with the `model.old_fit` method, but will be removed in a future version.
Nowadays, with Sentence Transformers v3+, it is recommended to use the `SentenceTransformerTrainer` class to train models.
See https://www.sbert.net/docs/sentence_transformer/training_overview.html for more information.
Instead, you should create a `datasets` `Dataset` for training: https://huggingface.co/docs/datasets/create_dataset
"""
from __future__ import annotations
class InputExample:
"""Structure for one input example with texts, the label and a unique id"""
def __init__(self, guid: str = "", texts: list[str] = None, label: int | float = 0):
"""
Creates one InputExample with the given texts, guid and label
Args:
guid: id for the example
texts: the texts for the example.
label: the label for the example
"""
self.guid = guid
self.texts = texts
self.label = label
def __str__(self):
return "<InputExample> label: {}, texts: {}".format(str(self.label), "; ".join(self.texts))
|
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
"""[BETA] See :class:`~torchvision.transforms.v2.UniformTemporalSubsample` for details."""
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
import torch
from torchvision import datapoints
from torchvision.utils import _log_api_usage_once
from ._utils import _get_kernel, _register_kernel_internal
def uniform_temporal_subsample(inpt: torch.Tensor, num_samples: int) -> torch.Tensor:
if torch.jit.is_scripting():
return uniform_temporal_subsample_video(inpt, num_samples=num_samples)
_log_api_usage_once(uniform_temporal_subsample)
kernel = _get_kernel(uniform_temporal_subsample, type(inpt))
return kernel(inpt, num_samples=num_samples)
@_register_kernel_internal(uniform_temporal_subsample, torch.Tensor)
@_register_kernel_internal(uniform_temporal_subsample, datapoints.Video)
def uniform_temporal_subsample_video(video: torch.Tensor, num_samples: int) -> torch.Tensor:
# Reference: https://github.com/facebookresearch/pytorchvideo/blob/a0a131e/pytorchvideo/transforms/functional.py#L19
t_max = video.shape[-4] - 1
indices = torch.linspace(0, t_max, num_samples, device=video.device).long()
return torch.index_select(video, -4, indices)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.registry import MODELS
from .single_stage import SingleStageDetector
@MODELS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
# Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, normed_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8, # using 32 GPUS while training.
workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory
train=dict(
_delete_=True,
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/oidv6-train-annotations-bbox.csv',
img_prefix=data_root + 'OpenImages/train/',
label_file=data_root +
'annotations/class-descriptions-boxable.csv',
hierarchy_file=data_root +
'annotations/bbox_labels_600_hierarchy.json',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=20000,
warmup_ratio=0.001,
step=[8, 11])
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (32 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=256)
|
_base_ = [
'../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py'
]
model = dict(
bbox_head=dict(
num_classes=601,
anchor_generator=dict(basesize_ratio_range=(0.2, 0.9))))
# dataset settings
dataset_type = 'OpenImagesDataset'
data_root = 'data/OpenImages/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, normed_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=8, # using 32 GPUS while training.
workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory
train=dict(
_delete_=True,
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root +
'annotations/oidv6-train-annotations-bbox.csv',
img_prefix=data_root + 'OpenImages/train/',
label_file=data_root +
'annotations/class-descriptions-boxable.csv',
hierarchy_file=data_root +
'annotations/bbox_labels_600_hierarchy.json',
pipeline=train_pipeline)),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=20000,
warmup_ratio=0.001,
step=[8, 11])
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
from typing import Dict, Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
super().__init__()
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(
self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1
) -> Dict[str, float]:
evaluations = []
scores = []
for evaluator_idx, evaluator in enumerate(self.evaluators):
evaluation = evaluator(model, output_path, epoch, steps)
if not isinstance(evaluation, dict):
scores.append(evaluation)
evaluation = {f"evaluator_{evaluator_idx}": evaluation}
else:
if hasattr(evaluation, "primary_metric"):
scores.append(evaluation[evaluation.primary_metric])
else:
scores.append(evaluation[list(evaluation.keys())[0]])
evaluations.append(evaluation)
self.primary_metric = "sequential_score"
main_score = self.main_score_function(scores)
results = {key: value for evaluation in evaluations for key, value in evaluation.items()}
results["sequential_score"] = main_score
return results
|
from sentence_transformers import SentenceTransformer
from . import SentenceEvaluator
from typing import Iterable
class SequentialEvaluator(SentenceEvaluator):
"""
This evaluator allows that multiple sub-evaluators are passed. When the model is evaluated,
the data is passed sequentially to all sub-evaluators.
All scores are passed to 'main_score_function', which derives one final score value
"""
def __init__(self, evaluators: Iterable[SentenceEvaluator], main_score_function=lambda scores: scores[-1]):
self.evaluators = evaluators
self.main_score_function = main_score_function
def __call__(self, model: SentenceTransformer, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:
scores = []
for evaluator in self.evaluators:
scores.append(evaluator(model, output_path, epoch, steps))
return self.main_score_function(scores)
|
from collections.abc import Sequence
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM calls.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thoughts go here...
Follow up: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thoughts go here...
So the final answer is: The temperature is 100 degrees
```
"""
followups: Sequence[str] = ("Follow up:", "Followup:")
finish_string: str = "So the final answer is: "
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
last_line = text.split("\n")[-1]
if not any(follow in last_line for follow in self.followups):
if self.finish_string not in last_line:
msg = f"Could not parse output: {text}"
raise OutputParserException(msg)
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
after_colon = text.split(":")[-1].strip()
return AgentAction("Intermediate Answer", after_colon, text)
@property
def _type(self) -> str:
return "self_ask"
|
from collections.abc import Sequence
from typing import Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM calls.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thoughts go here...
Follow up: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thoughts go here...
So the final answer is: The temperature is 100 degrees
```
"""
followups: Sequence[str] = ("Follow up:", "Followup:")
finish_string: str = "So the final answer is: "
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
last_line = text.split("\n")[-1]
if not any([follow in last_line for follow in self.followups]):
if self.finish_string not in last_line:
msg = f"Could not parse output: {text}"
raise OutputParserException(msg)
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
after_colon = text.split(":")[-1].strip()
return AgentAction("Intermediate Answer", after_colon, text)
@property
def _type(self) -> str:
return "self_ask"
|
import logging
import os
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
if TYPE_CHECKING:
from fastapi import FastAPI
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement this abstract class in-case you want to build a fastapi-based Gateway by
implementing the `app` property. This property should return a fastapi app. The base Gateway will handle starting
a server and serving the application using that server."""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: bool = False,
**kwargs
):
"""Initialize the FastAPIBaseGateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.uvicorn_kwargs = uvicorn_kwargs or {}
if ssl_keyfile and 'ssl_keyfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_keyfile'] = ssl_keyfile
if ssl_certfile and 'ssl_certfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_certfile'] = ssl_certfile
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
@property
@abstractmethod
def app(self):
'''Get a FastAPI app'''
...
async def setup_server(self):
"""
Initialize and return GRPC server
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
# app property will generate a new fastapi app each time called
app = self.app
_install_health_check(app, self.logger)
self.server = UviServer(
config=Config(
app=app,
host=self.host,
port=self.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**self.uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""
Free resources allocated when setting up HTTP server
"""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run HTTP server forever"""
await self.server.serve()
def _install_health_check(app: 'FastAPI', logger):
health_check_exists = False
for route in app.routes:
if getattr(route, 'path', None) == '/' and 'GET' in getattr(
route, 'methods', None
):
health_check_exists = True
logger.warning(
'endpoint GET on "/" is used for health checks, make sure it\'s still accessible'
)
if not health_check_exists:
from jina.serve.runtimes.gateway.http.models import JinaHealthModel
@app.get(
path='/',
summary='Get the health of Jina Gateway service',
response_model=JinaHealthModel,
)
async def _gateway_health():
"""
Get the health of this Gateway service.
.. # noqa: DAR201
"""
return {}
|
import logging
import os
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
from jina.importer import ImportExtensions
from jina.serve.gateway import BaseGateway
if TYPE_CHECKING:
from fastapi import FastAPI
class FastAPIBaseGateway(BaseGateway):
"""Base FastAPI gateway. Implement this abstract class in-case you want to build a fastapi-based Gateway by
implementing the `app` property. This property should return a fastapi app. The base Gateway will handle starting
a server and serving the application using that server."""
def __init__(
self,
ssl_keyfile: Optional[str] = None,
ssl_certfile: Optional[str] = None,
uvicorn_kwargs: Optional[dict] = None,
proxy: bool = False,
**kwargs
):
"""Initialize the FastAPIBaseGateway
:param ssl_keyfile: the path to the key file
:param ssl_certfile: the path to the certificate file
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
:param proxy: If set, respect the http_proxy and https_proxy environment variables, otherwise, it will unset
these proxy variables before start. gRPC seems to prefer no proxy
:param kwargs: keyword args
"""
super().__init__(**kwargs)
self.uvicorn_kwargs = uvicorn_kwargs or {}
if ssl_keyfile and 'ssl_keyfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_keyfile'] = ssl_keyfile
if ssl_certfile and 'ssl_certfile' not in self.uvicorn_kwargs.keys():
self.uvicorn_kwargs['ssl_certfile'] = ssl_certfile
if not proxy and os.name != 'nt':
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
@property
@abstractmethod
def app(self):
'''Get a FastAPI app'''
...
async def setup_server(self):
"""
Initialize and return GRPC server
"""
with ImportExtensions(required=True):
from uvicorn import Config, Server
class UviServer(Server):
"""The uvicorn server."""
async def setup(self, sockets=None):
"""
Setup uvicorn server.
:param sockets: sockets of server.
"""
config = self.config
if not config.loaded:
config.load()
self.lifespan = config.lifespan_class(config)
await self.startup(sockets=sockets)
if self.should_exit:
return
async def serve(self, **kwargs):
"""
Start the server.
:param kwargs: keyword arguments
"""
await self.main_loop()
if 'CICD_JINA_DISABLE_HEALTHCHECK_LOGS' in os.environ:
class _EndpointFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
# NOTE: space is important after `GET /`, else all logs will be disabled.
return record.getMessage().find("GET / ") == -1
# Filter out healthcheck endpoint `GET /`
logging.getLogger("uvicorn.access").addFilter(_EndpointFilter())
# app property will generate a new fastapi app each time called
app = self.app
_install_health_check(app, self.logger)
self.server = UviServer(
config=Config(
app=app,
host=self.host,
port=self.port,
log_level=os.getenv('JINA_LOG_LEVEL', 'error').lower(),
**self.uvicorn_kwargs,
)
)
await self.server.setup()
async def shutdown(self):
"""
Free resources allocated when setting up HTTP server
"""
self.server.should_exit = True
await self.server.shutdown()
async def run_server(self):
"""Run HTTP server forever"""
await self.server.serve()
def _install_health_check(app: 'FastAPI', logger):
health_check_exists = False
for route in app.routes:
if getattr(route, 'path', None) == '/' and 'GET' in getattr(
route, 'methods', None
):
health_check_exists = True
logger.warning(
'endpoint GET on "/" is used for health checks, make sure it\'s still accessible'
)
if not health_check_exists:
@app.get('/')
def health_check():
return {}
|
import inspect
import re
import sys
from collections import defaultdict
from jina import Document
all_meth = defaultdict(list)
for f in inspect.getmembers(Document):
if (
callable(f[1])
and not f[1].__name__.startswith('_')
and not f[0].startswith('_')
):
if 'return' in inspect.getfullargspec(f[1]).annotations and str(
inspect.getfullargspec(f[1]).annotations['return']
) in ('~T', 'T'):
module_name = f[1].__qualname__.split('.')[0].replace('Mixin', '')
desc = inspect.getdoc(
vars(sys.modules[f[1].__module__])[f[1].__qualname__.split('.')[0]]
)
all_meth[
(
module_name,
desc.strip()
.replace(':class:', '{class}')
.replace(':attr:', '{attr}'),
)
].append(f'{{meth}}`~{f[1].__module__}.{f[1].__qualname__}`')
all_s = []
for k, v in all_meth.items():
all_s.append(f'### {k[0].strip()}')
all_s.append(f'{k[1].strip()}')
for vv in v:
all_s.append(f'- {vv}')
all_s.append('\n')
doc_md = '../docs/fundamentals/document/fluent-interface.md'
text = '\n'.join(all_s)
with open(doc_md, encoding='utf-8') as fp:
_old = fp.read()
_new = re.sub(
r'(<!-- fluent-interface-start -->\s*?\n).*(\n\s*?<!-- fluent-interface-end -->)',
rf'\g<1>{text}\g<2>',
_old,
flags=re.DOTALL,
)
with open(doc_md, 'w', encoding='utf-8') as fp:
fp.write(_new)
|
import inspect
import re
import sys
from collections import defaultdict
from jina import Document
all_meth = defaultdict(list)
for f in inspect.getmembers(Document):
if (
callable(f[1])
and not f[1].__name__.startswith('_')
and not f[0].startswith('_')
):
if 'return' in inspect.getfullargspec(f[1]).annotations and str(
inspect.getfullargspec(f[1]).annotations['return']
) in ('~T', 'T'):
module_name = f[1].__qualname__.split('.')[0].replace('Mixin', '')
desc = inspect.getdoc(
vars(sys.modules[f[1].__module__])[f[1].__qualname__.split('.')[0]]
)
all_meth[
(
module_name,
desc.strip()
.replace(':class:', '{class}')
.replace(':attr:', '{attr}'),
)
].append(f'{{meth}}`~{f[1].__module__}.{f[1].__qualname__}`')
all_s = []
for k, v in all_meth.items():
all_s.append(f'### {k[0].strip()}')
all_s.append(f'{k[1].strip()}')
for vv in v:
all_s.append(f'- {vv}')
all_s.append('\n')
doc_md = '../docs/fundamentals/document/fluent-interface.md'
text = '\n'.join(all_s)
with open(doc_md) as fp:
_old = fp.read()
_new = re.sub(
r'(<!-- fluent-interface-start -->\s*?\n).*(\n\s*?<!-- fluent-interface-end -->)',
rf'\g<1>{text}\g<2>',
_old,
flags=re.DOTALL,
)
with open(doc_md, 'w') as fp:
fp.write(_new)
|
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .conformer import Conformer
from .conv_tasnet import conv_tasnet_base, ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
wavlm_base,
wavlm_large,
wavlm_model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"conv_tasnet_base",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wavlm_model",
"wavlm_base",
"wavlm_large",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
"HDemucs",
"hdemucs_low",
"hdemucs_medium",
"hdemucs_high",
]
|
from ._hdemucs import HDemucs, hdemucs_high, hdemucs_low, hdemucs_medium
from .conformer import Conformer
from .conv_tasnet import conv_tasnet_base, ConvTasNet
from .deepspeech import DeepSpeech
from .emformer import Emformer
from .rnnt import emformer_rnnt_base, emformer_rnnt_model, RNNT
from .rnnt_decoder import Hypothesis, RNNTBeamSearch
from .tacotron2 import Tacotron2
from .wav2letter import Wav2Letter
from .wav2vec2 import (
hubert_base,
hubert_large,
hubert_pretrain_base,
hubert_pretrain_large,
hubert_pretrain_model,
hubert_pretrain_xlarge,
hubert_xlarge,
HuBERTPretrainModel,
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
wav2vec2_model,
Wav2Vec2Model,
)
from .wavernn import WaveRNN
__all__ = [
"Wav2Letter",
"WaveRNN",
"ConvTasNet",
"conv_tasnet_base",
"DeepSpeech",
"Wav2Vec2Model",
"HuBERTPretrainModel",
"wav2vec2_model",
"wav2vec2_base",
"wav2vec2_large",
"wav2vec2_large_lv60k",
"hubert_base",
"hubert_large",
"hubert_xlarge",
"hubert_pretrain_model",
"hubert_pretrain_base",
"hubert_pretrain_large",
"hubert_pretrain_xlarge",
"Tacotron2",
"Conformer",
"Emformer",
"Hypothesis",
"RNNT",
"RNNTBeamSearch",
"emformer_rnnt_base",
"emformer_rnnt_model",
"HDemucs",
"hdemucs_low",
"hdemucs_medium",
"hdemucs_high",
]
|
"""Argparser module for Pod runtimes"""
import argparse
from dataclasses import dataclass
from typing import Dict
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
@dataclass
class PodTypeParams:
"""Data Class representing possible parameters for each pod type"""
runtime_cls: str
role_type: PodRoleType
POD_PARAMS_MAPPING: Dict[str, PodTypeParams] = {
'worker': PodTypeParams(runtime_cls='WorkerRuntime', role_type=PodRoleType.WORKER),
'head': PodTypeParams(runtime_cls='HeadRuntime', role_type=PodRoleType.HEAD),
'gateway': PodTypeParams(
runtime_cls='GatewayRuntime', role_type=PodRoleType.GATEWAY
),
}
def mixin_pod_parser(parser, pod_type: str = 'worker'):
"""Mixing in arguments required by :class:`Pod` into the given parser.
:param parser: the parser instance to which we add arguments
:param pod_type: the pod_type configured by the parser. Can be either 'worker' for WorkerRuntime or 'gateway' for GatewayRuntime
"""
gp = add_arg_group(parser, title='Pod')
gp.add_argument(
'--runtime-cls',
type=str,
default=POD_PARAMS_MAPPING[pod_type].runtime_cls,
help='The runtime class to run inside the Pod',
)
gp.add_argument(
'--timeout-ready',
type=int,
default=600000,
help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting '
'forever',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
# hidden CLI used for internal only
gp.add_argument(
'--shard-id',
type=int,
default=0,
help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--pod-role',
type=PodRoleType.from_string,
choices=list(PodRoleType),
default=POD_PARAMS_MAPPING[pod_type].role_type,
help='The role of this Pod in a Deployment'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--noblock-on-start',
action='store_true',
default=False,
help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on '
'`wait_start_success` at outer function for the postpone check.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--port',
type=int,
default=helper.random_port(),
help='The port for input data to bind to, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--monitoring',
action='store_true',
default=False,
help='If set, spawn an http server with a prometheus endpoint to expose metrics',
)
gp.add_argument(
'--port-monitoring',
type=str,
default=str(helper.random_port()),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--retries',
type=int,
default=-1,
dest='retries',
help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)',
)
gp.add_argument(
'--floating',
action='store_true',
default=False,
help='If set, the current Pod/Deployment can not be further chained, '
'and the next `.add()` will chain after the last Pod/Deployment not this current one.',
)
|
"""Argparser module for Pod runtimes"""
import argparse
from jina import helper
from jina.enums import PodRoleType
from jina.parsers.helper import _SHOW_ALL_ARGS, KVAppendAction, add_arg_group
def mixin_pod_parser(parser):
"""Mixing in arguments required by :class:`Pod` into the given parser.
:param parser: the parser instance to which we add arguments
"""
gp = add_arg_group(parser, title='Pod')
gp.add_argument(
'--runtime-cls',
type=str,
default='WorkerRuntime',
help='The runtime class to run inside the Pod',
)
gp.add_argument(
'--timeout-ready',
type=int,
default=600000,
help='The timeout in milliseconds of a Pod waits for the runtime to be ready, -1 for waiting '
'forever',
)
gp.add_argument(
'--env',
action=KVAppendAction,
metavar='KEY: VALUE',
nargs='*',
help='The map of environment variables that are available inside runtime',
)
# hidden CLI used for internal only
gp.add_argument(
'--shard-id',
type=int,
default=0,
help='defines the shard identifier for the executor. It is used as suffix for the workspace path of the executor`'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--pod-role',
type=PodRoleType.from_string,
choices=list(PodRoleType),
default=PodRoleType.WORKER,
help='The role of this Pod in a Deployment'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--noblock-on-start',
action='store_true',
default=False,
help='If set, starting a Pod/Deployment does not block the thread/process. It then relies on '
'`wait_start_success` at outer function for the postpone check.'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
gp.add_argument(
'--shards',
type=int,
default=1,
help='The number of shards in the deployment running at the same time. For more details check '
'https://docs.jina.ai/fundamentals/flow/create-flow/#complex-flow-topologies',
)
gp.add_argument(
'--replicas',
type=int,
default=1,
help='The number of replicas in the deployment',
)
gp.add_argument(
'--port',
type=int,
default=helper.random_port(),
help='The port for input data to bind to, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--monitoring',
action='store_true',
default=False,
help='If set, spawn an http server with a prometheus endpoint to expose metrics',
)
gp.add_argument(
'--port-monitoring',
type=str,
default=str(helper.random_port()),
dest='port_monitoring',
help=f'The port on which the prometheus server is exposed, default is a random port between [49152, 65535]',
)
gp.add_argument(
'--retries',
type=int,
default=-1,
dest='retries',
help=f'Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas)',
)
gp.add_argument(
'--floating',
action='store_true',
default=False,
help='If set, the current Pod/Deployment can not be further chained, '
'and the next `.add()` will chain after the last Pod/Deployment not this current one.',
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List
from pycocotools.coco import COCO
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
def convert_phrase_ids(phrase_ids: list) -> list:
unique_elements = sorted(set(phrase_ids))
element_to_new_label = {
element: label
for label, element in enumerate(unique_elements)
}
phrase_ids = [element_to_new_label[element] for element in phrase_ids]
return phrase_ids
@DATASETS.register_module()
class Flickr30kDataset(BaseDetDataset):
"""Flickr30K Dataset."""
def load_data_list(self) -> List[dict]:
self.coco = COCO(self.ann_file)
self.ids = sorted(list(self.coco.imgs.keys()))
data_list = []
for img_id in self.ids:
if isinstance(img_id, str):
ann_ids = self.coco.getAnnIds(imgIds=[img_id], iscrowd=None)
else:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
coco_img = self.coco.loadImgs(img_id)[0]
caption = coco_img['caption']
file_name = coco_img['file_name']
img_path = osp.join(self.data_prefix['img'], file_name)
width = coco_img['width']
height = coco_img['height']
tokens_positive = coco_img['tokens_positive_eval']
phrases = [caption[i[0][0]:i[0][1]] for i in tokens_positive]
phrase_ids = []
instances = []
annos = self.coco.loadAnns(ann_ids)
for anno in annos:
instance = {
'bbox': [
anno['bbox'][0], anno['bbox'][1],
anno['bbox'][0] + anno['bbox'][2],
anno['bbox'][1] + anno['bbox'][3]
],
'bbox_label':
anno['category_id'],
'ignore_flag':
anno['iscrowd']
}
phrase_ids.append(anno['phrase_ids'])
instances.append(instance)
phrase_ids = convert_phrase_ids(phrase_ids)
data_list.append(
dict(
img_path=img_path,
img_id=img_id,
height=height,
width=width,
instances=instances,
text=caption,
phrase_ids=phrase_ids,
tokens_positive=tokens_positive,
phrases=phrases,
))
return data_list
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from typing import List
from pycocotools.coco import COCO
from mmdet.registry import DATASETS
from .base_det_dataset import BaseDetDataset
@DATASETS.register_module()
class Flickr30kDataset(BaseDetDataset):
"""Flickr30K Dataset."""
def convert_phrase_ids(self, a):
unique_elements = sorted(set(a))
element_to_new_label = {
element: label
for label, element in enumerate(unique_elements)
}
discreticed_a = [element_to_new_label[element] for element in a]
return discreticed_a
def load_data_list(self) -> List[dict]:
self.coco = COCO(self.ann_file)
self.ids = sorted(list(self.coco.imgs.keys()))
data_list = []
for img_id in self.ids:
if isinstance(img_id, str):
ann_ids = self.coco.getAnnIds(imgIds=[img_id], iscrowd=None)
else:
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=None)
coco_img = self.coco.loadImgs(img_id)[0]
caption = coco_img['caption']
file_name = coco_img['file_name']
img_path = osp.join(self.data_prefix['img'], file_name)
width = coco_img['width']
height = coco_img['height']
tokens_positive = coco_img['tokens_positive_eval']
phrases = [caption[i[0][0]:i[0][1]] for i in tokens_positive]
phrase_ids = []
instances = []
annos = self.coco.loadAnns(ann_ids)
for anno in annos:
instance = {}
instance['bbox'] = [
anno['bbox'][0], anno['bbox'][1],
anno['bbox'][0] + anno['bbox'][2],
anno['bbox'][1] + anno['bbox'][3]
]
instance['bbox_label'] = anno['category_id']
instance['ignore_flag'] = anno['iscrowd']
phrase_ids.append(anno['phrase_ids'])
instances.append(instance)
phrase_ids = self.convert_phrase_ids(phrase_ids)
data_list.append(
dict(
img_path=img_path,
img_id=img_id,
height=height,
width=width,
instances=instances,
text=caption,
phrase_ids=phrase_ids,
tokens_positive=tokens_positive,
phrases=phrases,
))
return data_list
|
from typing import Any, Literal, Optional
import pytest
import re
import respx
import json
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
from llama_index.core.schema import NodeWithScore, Document
@pytest.fixture()
def mock_v1_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://integrate.api.nvidia.com/v1/models").respond(
json={
"data": [
{
"id": "mock-model",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
}
]
}
)
@pytest.fixture()
def mock_v1_ranking(respx_mock: respx.MockRouter) -> None:
respx_mock.post(
re.compile(r"https://ai\.api\.nvidia\.com/v1/.*/reranking")
).respond(
json={
"rankings": [
{"index": 0, "logit": 4.2},
]
}
)
@pytest.fixture()
def mock(mock_v1_models: None, mock_v1_ranking: None) -> None:
pass
@pytest.mark.parametrize(
"truncate",
[
None,
"END",
"NONE",
],
)
def test_truncate_passed(
mock: None,
respx_mock: respx.MockRouter,
truncate: Optional[Literal["END", "NONE"]],
) -> None:
client = NVIDIARerank(
api_key="BOGUS",
**({"truncate": truncate} if truncate else {}),
)
response = client.postprocess_nodes(
[NodeWithScore(node=Document(text="Nothing really."))],
query_str="What is it?",
)
assert len(response) == 1
assert len(respx.calls) > 0
last_call = list(respx.calls)[-1]
request_payload = json.loads(last_call.request.content.decode("utf-8"))
if truncate is None:
assert "truncate" not in request_payload
else:
assert "truncate" in request_payload
assert request_payload["truncate"] == truncate
@pytest.mark.parametrize("truncate", [True, False, 1, 0, 1.0, "START", "BOGUS"])
def test_truncate_invalid(truncate: Any) -> None:
with pytest.raises(ValueError):
NVIDIARerank(truncate=truncate)
@pytest.mark.integration
@pytest.mark.parametrize("truncate", ["END"])
def test_truncate_positive(model: str, mode: dict, truncate: str) -> None:
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(model=model, top_n=len(nodes), truncate=truncate, **mode)
response = client.postprocess_nodes(nodes, query_str=query)
print(response)
assert len(response) == len(nodes)
@pytest.mark.integration
@pytest.mark.parametrize("truncate", [None, "NONE"])
def test_truncate_negative(model: str, mode: dict, truncate: str) -> None:
if model == "nv-rerank-qa-mistral-4b:1":
pytest.skip(
"truncation is inconsistent across models, "
"nv-rerank-qa-mistral-4b:1 truncates by default "
"while others do not"
)
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(
model=model, **mode, **({"truncate": truncate} if truncate else {})
)
with pytest.raises(Exception) as e:
client.postprocess_nodes(nodes, query_str=query)
assert "400" in str(e.value)
# assert "exceeds maximum allowed" in str(e.value)
|
from typing import Any, Literal, Optional
import pytest
import re
import respx
import json
from llama_index.postprocessor.nvidia_rerank import NVIDIARerank
from llama_index.core.schema import NodeWithScore, Document
@pytest.fixture()
def mock_v1_models(respx_mock: respx.MockRouter) -> None:
respx_mock.get("https://integrate.api.nvidia.com/v1/models").respond(
json={
"data": [
{
"id": "mock-model",
"object": "model",
"created": 1234567890,
"owned_by": "OWNER",
}
]
}
)
@pytest.fixture()
def mock_v1_ranking(respx_mock: respx.MockRouter) -> None:
respx_mock.post(
re.compile(r"https://ai\.api\.nvidia\.com/v1/.*/reranking")
).respond(
json={
"rankings": [
{"index": 0, "logit": 4.2},
]
}
)
@pytest.fixture()
def mock(mock_v1_models: None, mock_v1_ranking: None) -> None:
pass
@pytest.mark.parametrize(
"truncate",
[
None,
"END",
"NONE",
],
)
def test_truncate_passed(
mock: None,
respx_mock: respx.MockRouter,
truncate: Optional[Literal["END", "NONE"]],
) -> None:
client = NVIDIARerank(
api_key="BOGUS",
**({"truncate": truncate} if truncate else {}),
)
response = client.postprocess_nodes(
[NodeWithScore(node=Document(text="Nothing really."))],
query_str="What is it?",
)
assert len(response) == 1
assert len(respx.calls) > 0
last_call = list(respx.calls)[-1]
request_payload = json.loads(last_call.request.content.decode("utf-8"))
if truncate is None:
assert "truncate" not in request_payload
else:
assert "truncate" in request_payload
assert request_payload["truncate"] == truncate
@pytest.mark.parametrize("truncate", [True, False, 1, 0, 1.0, "START", "BOGUS"])
def test_truncate_invalid(truncate: Any) -> None:
with pytest.raises(ValueError):
NVIDIARerank(truncate=truncate)
@pytest.mark.integration()
@pytest.mark.parametrize("truncate", ["END"])
def test_truncate_positive(model: str, mode: dict, truncate: str) -> None:
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(model=model, top_n=len(nodes), truncate=truncate, **mode)
response = client.postprocess_nodes(nodes, query_str=query)
print(response)
assert len(response) == len(nodes)
@pytest.mark.integration()
@pytest.mark.parametrize("truncate", [None, "NONE"])
def test_truncate_negative(model: str, mode: dict, truncate: str) -> None:
if model == "nv-rerank-qa-mistral-4b:1":
pytest.skip(
"truncation is inconsistent across models, "
"nv-rerank-qa-mistral-4b:1 truncates by default "
"while others do not"
)
query = "What is acceleration?"
nodes = [
NodeWithScore(node=Document(text="NVIDIA " * length))
for length in [32, 1024, 64, 128, 2048, 256, 512]
]
client = NVIDIARerank(
model=model, **mode, **({"truncate": truncate} if truncate else {})
)
with pytest.raises(Exception) as e:
client.postprocess_nodes(nodes, query_str=query)
assert "400" in str(e.value)
# assert "exceeds maximum allowed" in str(e.value)
|
"""Test IPEX LLM"""
import os
import pytest
from langchain_community.embeddings import IpexLLMBgeEmbeddings
model_ids_to_test = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test,
reason="TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS environment variable not set.",
)
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore[assignment]
device = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_DEVICE") or "cpu"
sentence = "IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., \
local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency."
query = "What is IPEX-LLM?"
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_embed_documents(model_id: str) -> None:
"""Test IpexLLMBgeEmbeddings embed_documents"""
embedding_model = IpexLLMBgeEmbeddings(
model_name=model_id,
model_kwargs={"device": device},
encode_kwargs={"normalize_embeddings": True},
)
output = embedding_model.embed_documents([sentence, query])
assert len(output) == 2
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_embed_query(model_id: str) -> None:
"""Test IpexLLMBgeEmbeddings embed_documents"""
embedding_model = IpexLLMBgeEmbeddings(
model_name=model_id,
model_kwargs={"device": device},
encode_kwargs={"normalize_embeddings": True},
)
output = embedding_model.embed_query(query)
assert isinstance(output, list)
|
"""Test IPEX LLM"""
import os
import pytest
from langchain_community.embeddings import IpexLLMBgeEmbeddings
model_ids_to_test = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS") or ""
skip_if_no_model_ids = pytest.mark.skipif(
not model_ids_to_test,
reason="TEST_IPEXLLM_BGE_EMBEDDING_MODEL_IDS environment variable not set.",
)
model_ids_to_test = [model_id.strip() for model_id in model_ids_to_test.split(",")] # type: ignore
device = os.getenv("TEST_IPEXLLM_BGE_EMBEDDING_MODEL_DEVICE") or "cpu"
sentence = "IPEX-LLM is a PyTorch library for running LLM on Intel CPU and GPU (e.g., \
local PC with iGPU, discrete GPU such as Arc, Flex and Max) with very low latency."
query = "What is IPEX-LLM?"
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_embed_documents(model_id: str) -> None:
"""Test IpexLLMBgeEmbeddings embed_documents"""
embedding_model = IpexLLMBgeEmbeddings(
model_name=model_id,
model_kwargs={"device": device},
encode_kwargs={"normalize_embeddings": True},
)
output = embedding_model.embed_documents([sentence, query])
assert len(output) == 2
@skip_if_no_model_ids
@pytest.mark.parametrize(
"model_id",
model_ids_to_test,
)
def test_embed_query(model_id: str) -> None:
"""Test IpexLLMBgeEmbeddings embed_documents"""
embedding_model = IpexLLMBgeEmbeddings(
model_name=model_id,
model_kwargs={"device": device},
encode_kwargs={"normalize_embeddings": True},
)
output = embedding_model.embed_query(query)
assert isinstance(output, list)
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(
self._map_id(x), class_name=self._class_name
)
elif isinstance(x, Document):
return self._client.data_object.exists(
self._map_id(x.id), class_name=self._class_name
)
else:
return False
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(
batch_size=self._config.batch_size, dynamic=self._config.dynamic_batching
) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
from typing import Union, Iterable
from docarray.array.storage.base.seqlike import BaseSequenceLikeMixin
from docarray.array.storage.registry import _REGISTRY
from docarray import Document
class SequenceLikeMixin(BaseSequenceLikeMixin):
"""Implement sequence-like methods for DocumentArray with weaviate as storage"""
def __eq__(self, other):
"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""
# two DAW are considered as the same if they have the same client meta data
return (
type(self) is type(other)
and self._client.get_meta() == other._client.get_meta()
and self._config == other._config
)
def __len__(self):
"""Return the length of :class:`DocumentArray` that uses weaviate as storage
:return: the length of this :class:`DocumentArrayWeaviate` object
"""
cls_data = (
self._client.query.aggregate(self._class_name)
.with_meta_count()
.do()
.get('data', {})
.get('Aggregate', {})
.get(self._class_name, [])
)
if not cls_data:
return 0
return cls_data[0]['meta']['count']
def __contains__(self, x: Union[str, 'Document']):
"""Check if ``x`` is contained in this :class:`DocumentArray` with weaviate storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""
if isinstance(x, str):
return self._client.data_object.exists(self._map_id(x))
elif isinstance(x, Document):
return self._client.data_object.exists(self._map_id(x.id))
else:
return False
def __repr__(self):
"""Return the string representation of :class:`DocumentArrayWeaviate` object
:return: string representation of this object
"""
return f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'
def _extend(self, values: Iterable['Document'], **kwargs) -> None:
"""Extends the array with the given values
:param values: Documents to be added
"""
with self._client.batch(batch_size=50) as _b:
for d in values:
_b.add_data_object(**self._doc2weaviate_create_payload(d))
self._offset2ids.append(d.id)
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[list[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
raise RuntimeError(
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path) as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain.agents.agent import BaseMultiActionAgent, BaseSingleActionAgent
from langchain.agents.types import AGENT_TO_CLASS
from langchain.chains.loading import load_chain, load_chain_from_config
logger = logging.getLogger(__file__)
URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/agents/"
def _load_agent_from_tools(
config: dict, llm: BaseLanguageModel, tools: list[Tool], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
combined_config = {**config, **kwargs}
return agent_cls.from_llm_and_tools(llm, tools, **combined_config)
@deprecated("0.1.0", removal="1.0")
def load_agent_from_config(
config: dict,
llm: Optional[BaseLanguageModel] = None,
tools: Optional[list[Tool]] = None,
**kwargs: Any,
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from Config Dict.
Args:
config: Config dict to load agent from.
llm: Language model to use as the agent.
tools: List of tools this agent has access to.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
ValueError: If agent type is not specified in the config.
"""
if "_type" not in config:
raise ValueError("Must specify an agent Type in config")
load_from_tools = config.pop("load_from_llm_and_tools", False)
if load_from_tools:
if llm is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, then LLM must be provided"
)
if tools is None:
raise ValueError(
"If `load_from_llm_and_tools` is set to True, "
"then tools must be provided"
)
return _load_agent_from_tools(config, llm, tools, **kwargs)
config_type = config.pop("_type")
if config_type not in AGENT_TO_CLASS:
raise ValueError(f"Loading {config_type} agent not supported")
agent_cls = AGENT_TO_CLASS[config_type]
if "llm_chain" in config:
config["llm_chain"] = load_chain_from_config(config.pop("llm_chain"))
elif "llm_chain_path" in config:
config["llm_chain"] = load_chain(config.pop("llm_chain_path"))
else:
raise ValueError("One of `llm_chain` and `llm_chain_path` should be specified.")
if "output_parser" in config:
logger.warning(
"Currently loading output parsers on agent is not supported, "
"will just use the default one."
)
del config["output_parser"]
combined_config = {**config, **kwargs}
return agent_cls(**combined_config) # type: ignore
@deprecated("0.1.0", removal="1.0")
def load_agent(
path: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Unified method for loading an agent from LangChainHub or local fs.
Args:
path: Path to the agent file.
kwargs: Additional keyword arguments passed to the agent executor.
Returns:
An agent executor.
Raises:
RuntimeError: If loading from the deprecated github-based
Hub is attempted.
"""
if isinstance(path, str) and path.startswith("lc://"):
raise RuntimeError(
"Loading from the deprecated github-based Hub is no longer supported. "
"Please use the new LangChain Hub at https://smith.langchain.com/hub "
"instead."
)
return _load_agent_from_file(path, **kwargs)
def _load_agent_from_file(
file: Union[str, Path], **kwargs: Any
) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:
"""Load agent from file."""
valid_suffixes = {"json", "yaml"}
# Convert file to Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix[1:] == "json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix[1:] == "yaml":
with open(file_path) as f:
config = yaml.safe_load(f)
else:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Load the agent from the config now.
return load_agent_from_config(config, **kwargs)
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
return LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
from typing import Any, Optional, Union, cast
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.output_parsers import BaseLLMOutputParser
from langchain_core.output_parsers.openai_functions import (
OutputFunctionsParser,
PydanticOutputFunctionsParser,
)
from langchain_core.prompts import PromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.utils.pydantic import is_basemodel_subclass
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.chains.openai_functions.utils import get_llm_kwargs
class AnswerWithSources(BaseModel):
"""An answer to the question, with sources."""
answer: str = Field(..., description="Answer to the question that was asked")
sources: list[str] = Field(
..., description="List of sources used to answer the question"
)
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with structured responses: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_structure_chain(
llm: BaseLanguageModel,
schema: Union[dict, type[BaseModel]],
output_parser: str = "base",
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
verbose: bool = False,
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources
based on schema.
Args:
llm: Language model to use for the chain.
schema: Pydantic schema to use for the output.
output_parser: Output parser to use. Should be one of `pydantic` or `base`.
Default to `base`.
prompt: Optional prompt to use for the chain.
Returns:
"""
if output_parser == "pydantic":
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
msg = (
"Must provide a pydantic class for schema when output_parser is "
"'pydantic'."
)
raise ValueError(msg)
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
pydantic_schema=schema
)
elif output_parser == "base":
_output_parser = OutputFunctionsParser()
else:
msg = (
f"Got unexpected output_parser: {output_parser}. "
f"Should be one of `pydantic` or `base`."
)
raise ValueError(msg)
if isinstance(schema, type) and is_basemodel_subclass(schema):
if hasattr(schema, "model_json_schema"):
schema_dict = cast(dict, schema.model_json_schema())
else:
schema_dict = cast(dict, schema.schema())
else:
schema_dict = cast(dict, schema)
function = {
"name": schema_dict["title"],
"description": schema_dict["description"],
"parameters": schema_dict,
}
llm_kwargs = get_llm_kwargs(function)
messages = [
SystemMessage(
content=(
"You are a world class algorithm to answer "
"questions in a specific format."
)
),
HumanMessage(content="Answer question using the following context"),
HumanMessagePromptTemplate.from_template("{context}"),
HumanMessagePromptTemplate.from_template("Question: {question}"),
HumanMessage(content="Tips: Make sure to answer in the correct format"),
]
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
chain = LLMChain(
llm=llm,
prompt=prompt,
llm_kwargs=llm_kwargs,
output_parser=_output_parser,
verbose=verbose,
)
return chain
@deprecated(
since="0.2.13",
removal="1.0",
message=(
"This function is deprecated. Refer to this guide on retrieval and question "
"answering with sources: "
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
),
)
def create_qa_with_sources_chain(
llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
) -> LLMChain:
"""Create a question answering chain that returns an answer with sources.
Args:
llm: Language model to use for the chain.
verbose: Whether to print the details of the chain
**kwargs: Keyword arguments to pass to `create_qa_with_structure_chain`.
Returns:
Chain (LLMChain) that can be used to answer questions with citations.
"""
return create_qa_with_structure_chain(
llm, AnswerWithSources, verbose=verbose, **kwargs
)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.registry import get_parser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"get_parser": "langchain_community.document_loaders.parsers.registry",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"get_parser",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders.parsers.registry import get_parser
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"get_parser": "langchain_community.document_loaders.parsers.registry"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"get_parser",
]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
SMTP = "smtp"
TWITTER = "twitter"
TODOIST = "todoist"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
from enum import Enum
# --8<-- [start:ProviderName]
class ProviderName(str, Enum):
ANTHROPIC = "anthropic"
COMPASS = "compass"
DISCORD = "discord"
D_ID = "d_id"
E2B = "e2b"
EXA = "exa"
FAL = "fal"
GITHUB = "github"
GOOGLE = "google"
GOOGLE_MAPS = "google_maps"
GROQ = "groq"
HUBSPOT = "hubspot"
IDEOGRAM = "ideogram"
JINA = "jina"
LINEAR = "linear"
MEDIUM = "medium"
MEM0 = "mem0"
NOTION = "notion"
NVIDIA = "nvidia"
OLLAMA = "ollama"
OPENAI = "openai"
OPENWEATHERMAP = "openweathermap"
OPEN_ROUTER = "open_router"
PINECONE = "pinecone"
REDDIT = "reddit"
REPLICATE = "replicate"
REVID = "revid"
SLANT3D = "slant3d"
SMTP = "smtp"
TWITTER = "twitter"
UNREAL_SPEECH = "unreal_speech"
# --8<-- [end:ProviderName]
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
# https://universe.roboflow.com/roboflow-100/people-in-paintings/dataset/2
data_root = 'data/people_in_painting_v2/'
class_name = ('Human', )
palette = [(220, 20, 60)]
metainfo = dict(classes=class_name, palette=palette)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities'))
]
train_dataloader = dict(
sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=10,
dataset=dict(
type='CocoDataset',
data_root=data_root,
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=train_pipeline,
return_classes=True,
data_prefix=dict(img='train/'),
ann_file='train/_annotations.coco.json')))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
return_classes=True,
ann_file='valid/_annotations.coco.json',
data_prefix=dict(img='valid/')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'valid/_annotations.coco.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1)
}))
# learning policy
max_epochs = 5
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[4],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto'))
load_from = 'https://download.openmmlab.com/mmdetection/v3.0/mm_grounding_dino/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det/grounding_dino_swin-t_pretrain_obj365_goldg_grit9m_v3det_20231204_095047-b448804b.pth' # noqa
|
_base_ = '../grounding_dino_swin-t_pretrain_obj365.py'
# https://universe.roboflow.com/roboflow-100/people-in-paintings/dataset/2
data_root = 'data/people_in_painting_v2/'
class_name = ('Human', )
palette = [(220, 20, 60)]
metainfo = dict(classes=class_name, palette=palette)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', prob=0.5),
dict(
type='RandomChoice',
transforms=[
[
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
],
[
dict(
type='RandomChoiceResize',
# The radio of all image in train dataset < 7
# follow the original implement
scales=[(400, 4200), (500, 4200), (600, 4200)],
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='RandomChoiceResize',
scales=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
(608, 1333), (640, 1333), (672, 1333), (704, 1333),
(736, 1333), (768, 1333), (800, 1333)],
keep_ratio=True)
]
]),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor', 'flip', 'flip_direction', 'text',
'custom_entities'))
]
train_dataloader = dict(
sampler=dict(_delete_=True, type='DefaultSampler', shuffle=True),
batch_sampler=dict(type='AspectRatioBatchSampler'),
dataset=dict(
_delete_=True,
type='RepeatDataset',
times=10,
dataset=dict(
type='CocoDataset',
data_root=data_root,
metainfo=metainfo,
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=train_pipeline,
return_classes=True,
data_prefix=dict(img='train/'),
ann_file='train/_annotations.coco.json')))
val_dataloader = dict(
dataset=dict(
metainfo=metainfo,
data_root=data_root,
return_classes=True,
ann_file='valid/_annotations.coco.json',
data_prefix=dict(img='valid/')))
test_dataloader = val_dataloader
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'valid/_annotations.coco.json',
metric='bbox',
format_only=False)
test_evaluator = val_evaluator
optim_wrapper = dict(
_delete_=True,
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.0001, weight_decay=0.0001),
clip_grad=dict(max_norm=0.1, norm_type=2),
paramwise_cfg=dict(custom_keys={
'absolute_pos_embed': dict(decay_mult=0.),
'backbone': dict(lr_mult=0.1)
}))
# learning policy
max_epochs = 5
param_scheduler = [
dict(
type='MultiStepLR',
begin=0,
end=max_epochs,
by_epoch=True,
milestones=[4],
gamma=0.1)
]
train_cfg = dict(max_epochs=max_epochs, val_interval=1)
default_hooks = dict(checkpoint=dict(max_keep_ckpts=1, save_best='auto'))
load_from = ''
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmdet.registry import TASK_UTILS
BBOX_ASSIGNERS = TASK_UTILS
BBOX_SAMPLERS = TASK_UTILS
BBOX_CODERS = TASK_UTILS
def build_assigner(cfg, **default_args):
"""Builder of box assigner."""
warnings.warn('``build_assigner`` would be deprecated soon, please use '
'``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args)
def build_sampler(cfg, **default_args):
"""Builder of box sampler."""
warnings.warn('``build_sampler`` would be deprecated soon, please use '
'``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args)
def build_bbox_coder(cfg, **default_args):
"""Builder of box coder."""
warnings.warn('``build_sampler`` would be deprecated soon, please use '
'``mmdet.registry.TASK_UTILS.build()`` ')
return TASK_UTILS.build(cfg, default_args=default_args)
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import Registry, build_from_cfg
BBOX_ASSIGNERS = Registry('bbox_assigner')
BBOX_SAMPLERS = Registry('bbox_sampler')
BBOX_CODERS = Registry('bbox_coder')
def build_assigner(cfg, **default_args):
"""Builder of box assigner."""
return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
def build_sampler(cfg, **default_args):
"""Builder of box sampler."""
return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
def build_bbox_coder(cfg, **default_args):
"""Builder of box coder."""
return build_from_cfg(cfg, BBOX_CODERS, default_args)
|
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
return AudioMetaData(
int(sinfo[8]),
sinfo[5],
sinfo[9],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[8])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
from typing import Dict, Optional, Tuple
import torch
import torchaudio
from torchaudio.backend.common import AudioMetaData
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _info_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
):
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
return AudioMetaData(
int(sinfo[7]),
sinfo[5],
sinfo[8],
sinfo[6],
sinfo[1].upper(),
)
def info_audio(
src: str,
format: Optional[str],
) -> AudioMetaData:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _info_audio(s)
def info_audio_fileobj(
src,
format: Optional[str],
) -> AudioMetaData:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _info_audio(s)
def _get_load_filter(
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
) -> Optional[str]:
if frame_offset < 0:
raise RuntimeError("Invalid argument: frame_offset must be non-negative. Found: {}".format(frame_offset))
if num_frames == 0 or num_frames < -1:
raise RuntimeError("Invalid argument: num_frames must be -1 or greater than 0. Found: {}".format(num_frames))
# All default values -> no filter
if frame_offset == 0 and num_frames == -1 and not convert:
return None
# Only convert
aformat = "aformat=sample_fmts=fltp"
if frame_offset == 0 and num_frames == -1 and convert:
return aformat
# At least one of frame_offset or num_frames has non-default value
if num_frames > 0:
atrim = "atrim=start_sample={}:end_sample={}".format(frame_offset, frame_offset + num_frames)
else:
atrim = "atrim=start_sample={}".format(frame_offset)
if not convert:
return atrim
return "{},{}".format(atrim, aformat)
# Note: need to comply TorchScript syntax -- need annotation and no f-string nor global
def _load_audio(
s: torch.classes.torchaudio.ffmpeg_StreamReader,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
) -> Tuple[torch.Tensor, int]:
i = s.find_best_audio_stream()
sinfo = s.get_src_stream_info(i)
sample_rate = int(sinfo[7])
option: Dict[str, str] = {}
s.add_audio_stream(i, -1, -1, _get_load_filter(frame_offset, num_frames, convert), None, option)
s.process_all_packets()
waveform = s.pop_chunks()[0]
if waveform is None:
raise RuntimeError("Failed to decode audio.")
assert waveform is not None
if channels_first:
waveform = waveform.T
return waveform, sample_rate
def load_audio(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torch.classes.torchaudio.ffmpeg_StreamReader(src, format, None)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
def load_audio_fileobj(
src: str,
frame_offset: int = 0,
num_frames: int = -1,
convert: bool = True,
channels_first: bool = True,
format: Optional[str] = None,
) -> Tuple[torch.Tensor, int]:
s = torchaudio._torchaudio_ffmpeg.StreamReaderFileObj(src, format, None, 4096)
return _load_audio(s, frame_offset, num_frames, convert, channels_first)
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] Convert a PIL Image or ndarray to tensor and scale the values accordingly.
.. betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import warnings
from typing import Any, Dict, Union
import numpy as np
import PIL.Image
import torch
from torchvision.transforms import functional as _F
from torchvision.transforms.v2 import Transform
class ToTensor(Transform):
"""[BETA] Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
.. betastatus:: ToTensor transform
.. warning::
:class:`v2.ToTensor` is deprecated and will be removed in a future release.
Please use instead ``transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])``.
This transform does not support torchscript.
Converts a PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
or if the numpy.ndarray has dtype = np.uint8
In the other cases, tensors are returned without scaling.
.. note::
Because the input image is scaled to [0.0, 1.0], this transformation should not be used when
transforming target image masks. See the `references`_ for implementing the transforms for image masks.
.. _references: https://github.com/pytorch/vision/tree/main/references/segmentation
"""
_transformed_types = (PIL.Image.Image, np.ndarray)
def __init__(self) -> None:
warnings.warn(
"The transform `ToTensor()` is deprecated and will be removed in a future release. "
"Instead, please use `transforms.Compose([transforms.ToImageTensor(), transforms.ConvertImageDtype()])`."
)
super().__init__()
def _transform(self, inpt: Union[PIL.Image.Image, np.ndarray], params: Dict[str, Any]) -> torch.Tensor:
return _F.to_tensor(inpt)
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import NdArray, PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples)
assert isinstance(point_cloud, np.ndarray)
assert isinstance(point_cloud, NdArray)
assert point_cloud.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(point_cloud, np.ndarray)
assert len(point_cloud.shape) == 3
assert point_cloud.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
import numpy as np
import pytest
from pydantic.tools import parse_obj_as, schema_json_of
from docarray.base_document.io.json import orjson_dumps
from docarray.typing import PointCloud3DUrl
from tests import TOYDATA_DIR
MESH_FILES = {
'obj': str(TOYDATA_DIR / 'tetrahedron.obj'),
'glb': str(TOYDATA_DIR / 'test.glb'),
'ply': str(TOYDATA_DIR / 'cube.ply'),
}
REMOTE_OBJ_FILE = 'https://people.sc.fsu.edu/~jburkardt/data/obj/al.obj'
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples)
assert isinstance(point_cloud, np.ndarray)
assert point_cloud.shape == (n_samples, 3)
@pytest.mark.slow
@pytest.mark.internet
@pytest.mark.parametrize(
'file_format, file_path',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('remote-obj', REMOTE_OBJ_FILE),
],
)
def test_load_with_multiple_geometries_true(file_format, file_path):
n_samples = 100
url = parse_obj_as(PointCloud3DUrl, file_path)
point_cloud = url.load(samples=n_samples, multiple_geometries=True)
assert isinstance(point_cloud, np.ndarray)
assert len(point_cloud.shape) == 3
assert point_cloud.shape[1:] == (100, 3)
def test_json_schema():
schema_json_of(PointCloud3DUrl)
def test_dump_json():
url = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
orjson_dumps(url)
@pytest.mark.parametrize(
'file_format,path_to_file',
[
('obj', MESH_FILES['obj']),
('glb', MESH_FILES['glb']),
('ply', MESH_FILES['ply']),
('obj', REMOTE_OBJ_FILE),
('illegal', 'illegal'),
('illegal', 'https://www.google.com'),
('illegal', 'my/local/text/file.txt'),
('illegal', 'my/local/text/file.png'),
],
)
def test_validation(file_format, path_to_file):
if file_format == 'illegal':
with pytest.raises(ValueError, match='PointCloud3DUrl'):
parse_obj_as(PointCloud3DUrl, path_to_file)
else:
url = parse_obj_as(PointCloud3DUrl, path_to_file)
assert isinstance(url, PointCloud3DUrl)
assert isinstance(url, str)
def test_proto_point_cloud_url():
uri = parse_obj_as(PointCloud3DUrl, REMOTE_OBJ_FILE)
uri._to_node_protobuf()
|
_base_ = './vfnet_r50_fpn_ms-2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py'
model = dict(
backbone=dict(
type='Res2Net',
depth=101,
scales=4,
base_width=26,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
|
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metadata keys
CUSTOM_METADATA_KEY = "DCP_SHARDING_INFO"
DEFAULT_EXTRA_METADATA_KEY = "__metadata__"
SAVED_OFFSETS_KEY = "saved_offsets"
SHAPE_KEY = "shape"
DATA_KEY = "data"
DTYPE_KEY = "dtype"
DATA_OFFSETS_KEY = "data_offsets"
DTYPE_MAP = {
"F16": torch.float16,
"F32": torch.float32,
"F64": torch.float64,
"I8": torch.int8,
"U8": torch.uint8,
"I16": torch.int16,
"I32": torch.int32,
"I64": torch.int64,
"BF16": torch.bfloat16,
}
HF_DCP_VERSION: float = 1.0
DCP_VERSION_KEY = "DCP_VERSION"
DCP_SHARDING_INFO_KEY = "DCP_SHARDING_INFO"
FORMAT_KEY = "format"
FORMAT_VALUE = "pt"
NUM_BYTES_FOR_HEADER_LEN = 8
@dataclass
class _HFStorageInfo:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
shape: torch.Size
dtype: torch.dtype
def _gen_file_name(
index: int, largest_index: int, shard_index: Optional[int] = None
) -> str:
if shard_index is not None:
return (
SHARDED_FILE_NAME.format(
shard_idx=f"{shard_index}".zfill(5),
cpt_idx=f"{index}".zfill(5),
num_files=f"{largest_index}".zfill(5),
)
+ SUFFIX
)
else:
return (
FILE_NAME.format(
cpt_idx=f"{index}".zfill(5), num_files=f"{largest_index}".zfill(5)
)
+ SUFFIX
)
def _get_safetensors_file_metadata(file_bytes: io.IOBase) -> tuple[Any, int]:
# this uses the same logic that's done in HF code base
# https://github.com/2404589803/huggingface_hub/blob/main/src/huggingface_hub/hf_api.py#L5308
# and follows their documentation on how their files are serialized
# https://huggingface.co/docs/safetensors/index#format
header_len_bytes = file_bytes.read(NUM_BYTES_FOR_HEADER_LEN)
header_len = struct.unpack("<Q", header_len_bytes)[0]
header_json = file_bytes.read(header_len)
metadata = json.loads(header_json)
return (metadata, header_len + NUM_BYTES_FOR_HEADER_LEN)
def _get_dtype(dtype_str: str) -> torch.dtype:
try:
dtype = DTYPE_MAP[dtype_str]
except KeyError:
dtype = torch.get_default_dtype()
return dtype
def _get_dcp_custom_metadata(metadata: Any) -> Optional[Any]:
if DEFAULT_EXTRA_METADATA_KEY in metadata:
custom_metadata = metadata[DEFAULT_EXTRA_METADATA_KEY]
if CUSTOM_METADATA_KEY in custom_metadata:
return json.loads(custom_metadata[CUSTOM_METADATA_KEY])
return None
|
import io
import json
import struct
from dataclasses import dataclass
from typing import Any, Optional
import torch
_metadata_fn: str = "model.safetensors.index.json"
FILE_NAME = "model-{cpt_idx}-of-{num_files}"
SHARDED_FILE_NAME = "shard-{shard_idx}-model-{cpt_idx}-of-{num_files}"
SUFFIX = ".safetensors"
# metadata keys
CUSTOM_METADATA_KEY = "DCP_SHARDING_INFO"
DEFAULT_EXTRA_METADATA_KEY = "__metadata__"
SAVED_OFFSETS_KEY = "saved_offsets"
SHAPE_KEY = "shape"
DATA_KEY = "data"
DTYPE_KEY = "dtype"
DATA_OFFSETS_KEY = "data_offsets"
DTYPE_MAP = {
"F16": torch.float16,
"F32": torch.float32,
"F64": torch.float64,
"I8": torch.int8,
"U8": torch.uint8,
"I16": torch.int16,
"I32": torch.int32,
"I64": torch.int64,
"BF16": torch.bfloat16,
}
HF_DCP_VERSION: float = 1.0
DCP_VERSION_KEY = "DCP_VERSION"
DCP_SHARDING_INFO_KEY = "DCP_SHARDING_INFO"
FORMAT_KEY = "format"
FORMAT_VALUE = "pt"
@dataclass
class _HFStorageInfo:
"""This is the per entry storage info."""
relative_path: str
offset: int
length: int
shape: torch.Size
dtype: torch.dtype
def _gen_file_name(
index: int, largest_index: int, shard_index: Optional[int] = None
) -> str:
if shard_index is not None:
return (
SHARDED_FILE_NAME.format(
shard_idx=f"{shard_index}".zfill(5),
cpt_idx=f"{index}".zfill(5),
num_files=f"{largest_index}".zfill(5),
)
+ SUFFIX
)
else:
return (
FILE_NAME.format(
cpt_idx=f"{index}".zfill(5), num_files=f"{largest_index}".zfill(5)
)
+ SUFFIX
)
def _get_safetensors_file_metadata(file_bytes: io.IOBase) -> tuple[Any, int]:
# this uses the same logic that's done in HF code base
# https://github.com/2404589803/huggingface_hub/blob/main/src/huggingface_hub/hf_api.py#L5308
# and follows their documentation on how their files are serialized
# https://huggingface.co/docs/safetensors/index#format
num_bytes_for_header_len = 8
header_len_bytes = file_bytes.read(num_bytes_for_header_len)
header_len = struct.unpack("<Q", header_len_bytes)[0]
header_json = file_bytes.read(header_len)
metadata = json.loads(header_json)
return (metadata, header_len + num_bytes_for_header_len)
def _get_dtype(dtype_str: str) -> torch.dtype:
try:
dtype = DTYPE_MAP[dtype_str]
except KeyError:
dtype = torch.get_default_dtype()
return dtype
def _get_dcp_custom_metadata(metadata: Any) -> Optional[Any]:
if DEFAULT_EXTRA_METADATA_KEY in metadata:
custom_metadata = metadata[DEFAULT_EXTRA_METADATA_KEY]
if CUSTOM_METADATA_KEY in custom_metadata:
return json.loads(custom_metadata[CUSTOM_METADATA_KEY])
return None
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(*, raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(*, cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
*,
image_uri: str,
command_args: list[str],
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(["-e", "NCCL_RAS_ENABLE=0"])
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(image_uri)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args=cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(*, args: argparse.Namespace) -> None:
run_args = parse_run_args(raw_run_args=args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
image_uri=args.image_uri,
command_args=args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --image-uri IMAGE_URI [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--image-uri",
type=str,
required=True,
help=(
"Fully qualified image URI to identify the container, e.g. "
"492475357299.dkr.ecr.us-west-2.amazonaws.com/xgb-ci.gpu:main"
),
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(args=parsed_args)
|
"""
Wrapper script to run a command inside a Docker container
"""
import argparse
import grp
import itertools
import os
import pathlib
import pwd
import subprocess
import sys
import textwrap
OPS_DIR = pathlib.Path(__file__).expanduser().resolve().parent
PROJECT_ROOT_DIR = OPS_DIR.parent
LINEWIDTH = 88
TEXT_WRAPPER = textwrap.TextWrapper(
width=LINEWIDTH,
initial_indent="",
subsequent_indent=" ",
break_long_words=False,
break_on_hyphens=False,
)
def parse_run_args(*, raw_run_args: str) -> list[str]:
return [x for x in raw_run_args.split() if x]
def get_user_ids() -> dict[str, str]:
uid = os.getuid()
gid = os.getgid()
return {
"CI_BUILD_UID": str(uid),
"CI_BUILD_USER": pwd.getpwuid(uid).pw_name,
"CI_BUILD_GID": str(gid),
"CI_BUILD_GROUP": grp.getgrgid(gid).gr_name,
}
def fancy_print_cli_args(*, cli_args: list[str]) -> None:
print(
"=" * LINEWIDTH
+ "\n"
+ " \\\n".join(TEXT_WRAPPER.wrap(" ".join(cli_args)))
+ "\n"
+ "=" * LINEWIDTH
+ "\n",
flush=True,
)
def docker_run(
*,
image_uri: str,
command_args: list[str],
use_gpus: bool,
workdir: pathlib.Path,
user_ids: dict[str, str],
extra_args: list[str],
) -> None:
# Command-line arguments to be passed to `docker run`
docker_run_cli_args = ["--rm", "--pid=host"]
if use_gpus:
docker_run_cli_args.extend(["--gpus", "all"])
docker_run_cli_args.extend(["-v", f"{workdir}:/workspace", "-w", "/workspace"])
docker_run_cli_args.extend(
itertools.chain.from_iterable([["-e", f"{k}={v}"] for k, v in user_ids.items()])
)
docker_run_cli_args.extend(extra_args)
docker_run_cli_args.append(image_uri)
docker_run_cli_args.extend(command_args)
cli_args = ["docker", "run"] + docker_run_cli_args
fancy_print_cli_args(cli_args=cli_args)
subprocess.run(cli_args, check=True, encoding="utf-8")
def main(*, args: argparse.Namespace) -> None:
run_args = parse_run_args(raw_run_args=args.run_args)
user_ids = get_user_ids()
if args.use_gpus:
print("Using NVIDIA GPUs for `docker run`")
if args.interactive:
print("Using interactive mode for `docker run`")
run_args.append("-it")
docker_run(
image_uri=args.image_uri,
command_args=args.command_args,
use_gpus=args.use_gpus,
workdir=args.workdir,
user_ids=user_ids,
extra_args=run_args,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
usage=(
f"{sys.argv[0]} --image-uri IMAGE_URI [--use-gpus] [--interactive] "
"[--workdir WORKDIR] [--run-args RUN_ARGS] -- COMMAND_ARG "
"[COMMAND_ARG ...]"
),
description="Run tasks inside a Docker container",
)
parser.add_argument(
"--image-uri",
type=str,
required=True,
help=(
"Fully qualified image URI to identify the container, e.g. "
"492475357299.dkr.ecr.us-west-2.amazonaws.com/xgb-ci.gpu:main"
),
)
parser.add_argument(
"--use-gpus",
action="store_true",
help=(
"Grant the container access to NVIDIA GPUs; requires the NVIDIA "
"Container Toolkit."
),
)
parser.add_argument(
"--interactive",
action="store_true",
help=(
"Run the container in the interactive mode; requires an interactive shell "
"(TTY). With this flag, you can use Ctrl-C to interrupt an long-running "
"command."
),
)
parser.add_argument(
"--workdir",
type=lambda p: pathlib.Path(p).expanduser().resolve(),
default=PROJECT_ROOT_DIR,
help="Path to working directory; if unset, use the project's root",
)
parser.add_argument(
"--run-args",
type=str,
default="",
help=(
"Argument(s) to be passed to `docker run`. When passing multiple "
"arguments, use single quotes to wrap them. Example: "
"--run-args '--cap-add SYS_PTRACE --shm-size=4g'"
),
)
parser.add_argument(
"command_args",
metavar="COMMAND_ARG",
type=str,
nargs="+",
help=(
"Argument(s) for the command to execute. NOTE. Make sure to specify "
"double-dash (--) to clearly distinguish between the command and the "
"preceding parameters. Example: --run-args '--cap-add SYS_PTRACE "
"--shm-size=4g' -- ./myprog"
),
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
parsed_args = parser.parse_args()
main(args=parsed_args)
|
"""Init file of LlamaIndex."""
__version__ = "0.12.23.post2"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""Init file of LlamaIndex."""
__version__ = "0.12.22"
import logging
from logging import NullHandler
from typing import Callable, Optional
try:
# Force pants to install eval_type_backport on 3.9
import eval_type_backport # noqa # type: ignore
except ImportError:
pass
# response
from llama_index.core.base.response.schema import Response
# import global eval handler
from llama_index.core.callbacks.global_handlers import set_global_handler
from llama_index.core.data_structs.struct_type import IndexStructType
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
# indices
# loading
from llama_index.core.indices import (
ComposableGraph,
DocumentSummaryIndex,
GPTDocumentSummaryIndex,
GPTKeywordTableIndex,
GPTListIndex,
GPTRAKEKeywordTableIndex,
GPTSimpleKeywordTableIndex,
GPTTreeIndex,
GPTVectorStoreIndex,
KeywordTableIndex,
KnowledgeGraphIndex,
ListIndex,
PropertyGraphIndex,
RAKEKeywordTableIndex,
SimpleKeywordTableIndex,
SummaryIndex,
TreeIndex,
VectorStoreIndex,
load_graph_from_storage,
load_index_from_storage,
load_indices_from_storage,
)
# structured
from llama_index.core.indices.common.struct_store.base import (
SQLDocumentContextBuilder,
)
# prompt helper
from llama_index.core.indices.prompt_helper import PromptHelper
# prompts
from llama_index.core.prompts import (
BasePromptTemplate,
ChatPromptTemplate,
# backwards compatibility
Prompt,
PromptTemplate,
SelectorPromptTemplate,
)
from llama_index.core.readers import SimpleDirectoryReader, download_loader
# Response Synthesizer
from llama_index.core.response_synthesizers.factory import get_response_synthesizer
from llama_index.core.schema import Document, QueryBundle
from llama_index.core.service_context import (
ServiceContext,
set_global_service_context,
)
# global settings
from llama_index.core.settings import Settings
# storage
from llama_index.core.storage.storage_context import StorageContext
# sql wrapper
from llama_index.core.utilities.sql_wrapper import SQLDatabase
# global tokenizer
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
# best practices for library logging:
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
logging.getLogger(__name__).addHandler(NullHandler())
__all__ = [
"StorageContext",
"ServiceContext",
"ComposableGraph",
# indices
"SummaryIndex",
"VectorStoreIndex",
"SimpleKeywordTableIndex",
"KeywordTableIndex",
"RAKEKeywordTableIndex",
"TreeIndex",
"DocumentSummaryIndex",
"KnowledgeGraphIndex",
"PropertyGraphIndex",
# indices - legacy names
"GPTKeywordTableIndex",
"GPTKnowledgeGraphIndex",
"GPTSimpleKeywordTableIndex",
"GPTRAKEKeywordTableIndex",
"GPTListIndex",
"ListIndex",
"GPTTreeIndex",
"GPTVectorStoreIndex",
"GPTDocumentSummaryIndex",
"Prompt",
"PromptTemplate",
"BasePromptTemplate",
"ChatPromptTemplate",
"SelectorPromptTemplate",
"SummaryPrompt",
"TreeInsertPrompt",
"TreeSelectPrompt",
"TreeSelectMultiplePrompt",
"RefinePrompt",
"QuestionAnswerPrompt",
"KeywordExtractPrompt",
"QueryKeywordExtractPrompt",
"Response",
"Document",
"SimpleDirectoryReader",
"VellumPredictor",
"VellumPromptRegistry",
"MockEmbedding",
"SQLDatabase",
"SQLDocumentContextBuilder",
"SQLContextBuilder",
"PromptHelper",
"IndexStructType",
"download_loader",
"load_graph_from_storage",
"load_index_from_storage",
"load_indices_from_storage",
"QueryBundle",
"get_response_synthesizer",
"set_global_service_context",
"set_global_handler",
"set_global_tokenizer",
"get_tokenizer",
"Settings",
]
# eval global toggle
from llama_index.core.callbacks.base_handler import BaseCallbackHandler
global_handler: Optional[BaseCallbackHandler] = None
# NOTE: keep for backwards compatibility
SQLContextBuilder = SQLDocumentContextBuilder
# global tokenizer
global_tokenizer: Optional[Callable[[str], list]] = None
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print(f"Run this script with: python {sys.argv[0]} path/to/sentences.txt")
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct-improved{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info(f"Train sentences: {len(train_sentences)}")
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info(f"Warmup-steps: {warmup_steps}")
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""
This file loads sentences from a provided text file. It is expected, that the there is one sentence per line in that text file.
CT will be training using these sentences. Checkpoints are stored every 500 steps to the output folder.
Usage:
python train_ct_from_file.py path/to/sentences.txt
"""
import gzip
import logging
import math
import sys
from datetime import datetime
import tqdm
from torch.utils.data import DataLoader
from sentence_transformers import LoggingHandler, SentenceTransformer, losses, models
#### Just some code to print debug information to stdout
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()]
)
#### /print debug information to stdout
## Training parameters
model_name = "distilbert-base-uncased"
batch_size = 128
num_epochs = 1
max_seq_length = 75
# Input file path (a text file, each line a sentence)
if len(sys.argv) < 2:
print("Run this script with: python {} path/to/sentences.txt".format(sys.argv[0]))
exit()
filepath = sys.argv[1]
# Save path to store our model
output_name = ""
if len(sys.argv) >= 3:
output_name = "-" + sys.argv[2].replace(" ", "_").replace("/", "_").replace("\\", "_")
model_output_path = "output/train_ct-improved{}-{}".format(output_name, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
# Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings
word_embedding_model = models.Transformer(model_name, max_seq_length=max_seq_length)
# Apply mean pooling to get one fixed sized sentence vector
pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
################# Read the train corpus #################
train_sentences = []
with gzip.open(filepath, "rt", encoding="utf8") if filepath.endswith(".gz") else open(
filepath, encoding="utf8"
) as fIn:
for line in tqdm.tqdm(fIn, desc="Read file"):
line = line.strip()
if len(line) >= 10:
train_sentences.append(line)
logging.info("Train sentences: {}".format(len(train_sentences)))
# A regular torch DataLoader and as loss we use losses.ContrastiveTensionLossInBatchNegatives
train_dataloader = DataLoader(train_sentences, batch_size=batch_size, shuffle=True, drop_last=True)
train_loss = losses.ContrastiveTensionLossInBatchNegatives(model)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logging.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=num_epochs,
warmup_steps=warmup_steps,
optimizer_params={"lr": 5e-5},
checkpoint_path=model_output_path,
show_progress_bar=True,
use_amp=False, # Set to True, if your GPU supports FP16 cores
)
|
"""Unit tests for verifying event dispatching.
Much of this code is indirectly tested already through many end-to-end tests
that generate traces based on the callbacks. The traces are all verified
via snapshot testing (e.g., see unit tests for runnables).
"""
import contextvars
from contextlib import asynccontextmanager
from typing import Any, Optional
from uuid import UUID
from typing_extensions import override
from langchain_core.callbacks import (
AsyncCallbackHandler,
AsyncCallbackManager,
BaseCallbackHandler,
)
async def test_inline_handlers_share_parent_context() -> None:
"""Verify that handlers that are configured to run_inline can update parent context.
This test was created because some of the inline handlers were getting
their own context as the handling logic was kicked off using asyncio.gather
which does not automatically propagate the parent context (by design).
This issue was affecting only a few specific handlers:
* on_llm_start
* on_chat_model_start
which in some cases were triggered with multiple prompts and as a result
triggering multiple tasks that were launched in parallel.
"""
some_var: contextvars.ContextVar[str] = contextvars.ContextVar("some_var")
class CustomHandler(AsyncCallbackHandler):
"""A handler that sets the context variable.
The handler sets the context variable to the name of the callback that was
called.
"""
def __init__(self, *, run_inline: bool) -> None:
"""Initialize the handler."""
self.run_inline = run_inline
@override
async def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
"""Update the callstack with the name of the callback."""
some_var.set("on_llm_start")
# The manager serves as a callback dispatcher.
# It's responsible for dispatching callbacks to all registered handlers.
manager = AsyncCallbackManager(handlers=[CustomHandler(run_inline=True)])
# Check on_llm_start
some_var.set("unset")
await manager.on_llm_start({}, ["prompt 1"])
assert some_var.get() == "on_llm_start"
# Check what happens when run_inline is False
# We don't expect the context to be updated
manager2 = AsyncCallbackManager(
handlers=[
CustomHandler(run_inline=False),
]
)
some_var.set("unset")
await manager2.on_llm_start({}, ["prompt 1"])
# Will not be updated because the handler is not inline
assert some_var.get() == "unset"
async def test_inline_handlers_share_parent_context_multiple() -> None:
"""A slightly more complex variation of the test unit test above.
This unit test verifies that things work correctly when there are multiple prompts,
and multiple handlers that are configured to run inline.
"""
counter_var = contextvars.ContextVar("counter", default=0)
shared_stack = []
@asynccontextmanager
async def set_counter_var() -> Any:
token = counter_var.set(0)
try:
yield
finally:
counter_var.reset(token)
class StatefulAsyncCallbackHandler(AsyncCallbackHandler):
def __init__(self, name: str, *, run_inline: bool = True):
self.name = name
self.run_inline = run_inline
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
if self.name == "StateModifier":
current_counter = counter_var.get()
counter_var.set(current_counter + 1)
state = counter_var.get()
elif self.name == "StateReader":
state = counter_var.get()
else:
state = None
shared_stack.append(state)
await super().on_llm_start(
serialized,
prompts,
run_id=run_id,
parent_run_id=parent_run_id,
**kwargs,
)
handlers: list[BaseCallbackHandler] = [
StatefulAsyncCallbackHandler("StateModifier", run_inline=True),
StatefulAsyncCallbackHandler("StateReader", run_inline=True),
StatefulAsyncCallbackHandler("NonInlineHandler", run_inline=False),
]
prompts = ["Prompt1", "Prompt2", "Prompt3"]
async with set_counter_var():
shared_stack.clear()
manager = AsyncCallbackManager(handlers=handlers)
await manager.on_llm_start({}, prompts)
# Assert the order of states
states = [entry for entry in shared_stack if entry is not None]
assert states == [
1,
1,
2,
2,
3,
3,
], f"Expected order of states was broken due to context loss. Got {states}"
|
"""Unit tests for verifying event dispatching.
Much of this code is indirectly tested already through many end-to-end tests
that generate traces based on the callbacks. The traces are all verified
via snapshot testing (e.g., see unit tests for runnables).
"""
import contextvars
from contextlib import asynccontextmanager
from typing import Any, Optional
from uuid import UUID
from langchain_core.callbacks import (
AsyncCallbackHandler,
AsyncCallbackManager,
BaseCallbackHandler,
)
async def test_inline_handlers_share_parent_context() -> None:
"""Verify that handlers that are configured to run_inline can update parent context.
This test was created because some of the inline handlers were getting
their own context as the handling logic was kicked off using asyncio.gather
which does not automatically propagate the parent context (by design).
This issue was affecting only a few specific handlers:
* on_llm_start
* on_chat_model_start
which in some cases were triggered with multiple prompts and as a result
triggering multiple tasks that were launched in parallel.
"""
some_var: contextvars.ContextVar[str] = contextvars.ContextVar("some_var")
class CustomHandler(AsyncCallbackHandler):
"""A handler that sets the context variable.
The handler sets the context variable to the name of the callback that was
called.
"""
def __init__(self, *, run_inline: bool) -> None:
"""Initialize the handler."""
self.run_inline = run_inline
async def on_llm_start(self, *args: Any, **kwargs: Any) -> None:
"""Update the callstack with the name of the callback."""
some_var.set("on_llm_start")
# The manager serves as a callback dispatcher.
# It's responsible for dispatching callbacks to all registered handlers.
manager = AsyncCallbackManager(handlers=[CustomHandler(run_inline=True)])
# Check on_llm_start
some_var.set("unset")
await manager.on_llm_start({}, ["prompt 1"])
assert some_var.get() == "on_llm_start"
# Check what happens when run_inline is False
# We don't expect the context to be updated
manager2 = AsyncCallbackManager(
handlers=[
CustomHandler(run_inline=False),
]
)
some_var.set("unset")
await manager2.on_llm_start({}, ["prompt 1"])
# Will not be updated because the handler is not inline
assert some_var.get() == "unset"
async def test_inline_handlers_share_parent_context_multiple() -> None:
"""A slightly more complex variation of the test unit test above.
This unit test verifies that things work correctly when there are multiple prompts,
and multiple handlers that are configured to run inline.
"""
counter_var = contextvars.ContextVar("counter", default=0)
shared_stack = []
@asynccontextmanager
async def set_counter_var() -> Any:
token = counter_var.set(0)
try:
yield
finally:
counter_var.reset(token)
class StatefulAsyncCallbackHandler(AsyncCallbackHandler):
def __init__(self, name: str, *, run_inline: bool = True):
self.name = name
self.run_inline = run_inline
async def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
if self.name == "StateModifier":
current_counter = counter_var.get()
counter_var.set(current_counter + 1)
state = counter_var.get()
elif self.name == "StateReader":
state = counter_var.get()
else:
state = None
shared_stack.append(state)
await super().on_llm_start(
serialized,
prompts,
run_id=run_id,
parent_run_id=parent_run_id,
**kwargs,
)
handlers: list[BaseCallbackHandler] = [
StatefulAsyncCallbackHandler("StateModifier", run_inline=True),
StatefulAsyncCallbackHandler("StateReader", run_inline=True),
StatefulAsyncCallbackHandler("NonInlineHandler", run_inline=False),
]
prompts = ["Prompt1", "Prompt2", "Prompt3"]
async with set_counter_var():
shared_stack.clear()
manager = AsyncCallbackManager(handlers=handlers)
await manager.on_llm_start({}, prompts)
# Assert the order of states
states = [entry for entry in shared_stack if entry is not None]
assert states == [
1,
1,
2,
2,
3,
3,
], f"Expected order of states was broken due to context loss. Got {states}"
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .ema import ExpMomentumEMA
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import (empty_instances, interpolate_as, sigmoid_geometric_mean,
unpack_gt_instances)
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty',
'ExpMomentumEMA', 'unpack_gt_instances', 'empty_instances'
]
|
# Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .ema import ExpMomentumEMA
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty',
'ExpMomentumEMA'
]
|
from abc import ABC, abstractmethod
from typing import Callable
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts import BasePromptTemplate
from pydantic import BaseModel, Field
class BasePromptSelector(BaseModel, ABC):
"""Base class for prompt selectors."""
@abstractmethod
def get_prompt(self, llm: BaseLanguageModel) -> BasePromptTemplate:
"""Get default prompt for a language model."""
class ConditionalPromptSelector(BasePromptSelector):
"""Prompt collection that goes through conditionals."""
default_prompt: BasePromptTemplate
"""Default prompt to use if no conditionals match."""
conditionals: list[
tuple[Callable[[BaseLanguageModel], bool], BasePromptTemplate]
] = Field(default_factory=list)
"""List of conditionals and prompts to use if the conditionals match."""
def get_prompt(self, llm: BaseLanguageModel) -> BasePromptTemplate:
"""Get default prompt for a language model.
Args:
llm: Language model to get prompt for.
Returns:
Prompt to use for the language model.
"""
for condition, prompt in self.conditionals:
if condition(llm):
return prompt
return self.default_prompt
def is_llm(llm: BaseLanguageModel) -> bool:
"""Check if the language model is a LLM.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseLLM model, False otherwise.
"""
return isinstance(llm, BaseLLM)
def is_chat_model(llm: BaseLanguageModel) -> bool:
"""Check if the language model is a chat model.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseChatModel model, False otherwise.
"""
return isinstance(llm, BaseChatModel)
|
from abc import ABC, abstractmethod
from typing import Callable, List, Tuple
from langchain_core.language_models import BaseLanguageModel
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import BaseLLM
from langchain_core.prompts import BasePromptTemplate
from pydantic import BaseModel, Field
class BasePromptSelector(BaseModel, ABC):
"""Base class for prompt selectors."""
@abstractmethod
def get_prompt(self, llm: BaseLanguageModel) -> BasePromptTemplate:
"""Get default prompt for a language model."""
class ConditionalPromptSelector(BasePromptSelector):
"""Prompt collection that goes through conditionals."""
default_prompt: BasePromptTemplate
"""Default prompt to use if no conditionals match."""
conditionals: List[
Tuple[Callable[[BaseLanguageModel], bool], BasePromptTemplate]
] = Field(default_factory=list)
"""List of conditionals and prompts to use if the conditionals match."""
def get_prompt(self, llm: BaseLanguageModel) -> BasePromptTemplate:
"""Get default prompt for a language model.
Args:
llm: Language model to get prompt for.
Returns:
Prompt to use for the language model.
"""
for condition, prompt in self.conditionals:
if condition(llm):
return prompt
return self.default_prompt
def is_llm(llm: BaseLanguageModel) -> bool:
"""Check if the language model is a LLM.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseLLM model, False otherwise.
"""
return isinstance(llm, BaseLLM)
def is_chat_model(llm: BaseLanguageModel) -> bool:
"""Check if the language model is a chat model.
Args:
llm: Language model to check.
Returns:
True if the language model is a BaseChatModel model, False otherwise.
"""
return isinstance(llm, BaseChatModel)
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import BearlyInterpreterTool
from langchain_community.tools.bearly.tool import (
BearlyInterpreterToolArguments,
FileInfo,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BearlyInterpreterToolArguments": "langchain_community.tools.bearly.tool",
"FileInfo": "langchain_community.tools.bearly.tool",
"BearlyInterpreterTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BearlyInterpreterTool",
"BearlyInterpreterToolArguments",
"FileInfo",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import BearlyInterpreterTool
from langchain_community.tools.bearly.tool import (
BearlyInterpreterToolArguments,
FileInfo,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BearlyInterpreterToolArguments": "langchain_community.tools.bearly.tool",
"FileInfo": "langchain_community.tools.bearly.tool",
"BearlyInterpreterTool": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BearlyInterpreterToolArguments",
"FileInfo",
"BearlyInterpreterTool",
]
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5": "kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update``
deps = {
"Pillow": "Pillow>=10.0.1,<=15.0",
"accelerate": "accelerate>=0.26.0",
"av": "av",
"beautifulsoup4": "beautifulsoup4",
"blobfile": "blobfile",
"codecarbon": "codecarbon>=2.8.1",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"hf_xet": "hf_xet",
"huggingface-hub": "huggingface-hub>=0.30.0,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1,<=0.4.13",
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
"jieba": "jieba",
"jinja2": "jinja2>=3.1.0",
"kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5": "kenlm@git+https://github.com/ydshieh/kenlm@78f664fb3dafe1468d868d71faf19534530698d5",
"keras": "keras>2.9,<2.16",
"keras-nlp": "keras-nlp>=0.3.1,<0.14.0",
"kernels": "kernels>=0.4.4,<0.5",
"librosa": "librosa",
"natten": "natten>=0.14.6,<0.15.0",
"nltk": "nltk<=3.8.1",
"num2words": "num2words",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optimum-benchmark": "optimum-benchmark>=0.3.0",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic",
"pytest": "pytest>=7.2.0,<8.0.0",
"pytest-asyncio": "pytest-asyncio",
"pytest-rerunfailures": "pytest-rerunfailures",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"pytest-order": "pytest-order",
"python": "python>=3.9.0",
"ray[tune]": "ray[tune]>=2.7.0",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff==0.11.2",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.4.3",
"sagemaker": "sagemaker>=2.31.0",
"schedulefree": "schedulefree>=1.2.6",
"scikit-learn": "scikit-learn",
"scipy": "scipy<1.13.0",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorboard": "tensorboard",
"tensorflow-cpu": "tensorflow-cpu>2.9,<2.16",
"tensorflow": "tensorflow>2.9,<2.16",
"tensorflow-text": "tensorflow-text<2.16",
"tensorflow-probability": "tensorflow-probability<0.24",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"tiktoken": "tiktoken",
"timm": "timm<=1.0.11",
"tokenizers": "tokenizers>=0.21,<0.22",
"torch": "torch>=2.1,<2.7",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
"pytest-rich": "pytest-rich",
"libcst": "libcst",
"rich": "rich",
}
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from ..builder import HEADS
from ..utils import ConvUpsample
from .base_semantic_head import BaseSemanticHead
@HEADS.register_module()
class PanopticFPNHead(BaseSemanticHead):
"""PanopticFPNHead used in Panoptic FPN.
Arg:
num_classes (int): Number of classes, including all stuff
classes and one thing class.
in_channels (int): Number of channels in the input feature
map.
inner_channels (int): Number of channels in inner features.
start_level (int): The start level of the input features
used in PanopticFPN.
end_level (int): The end level of the used features, the
`end_level`-th layer will not be used.
fg_range (tuple): Range of the foreground classes.
bg_range (tuple): Range of the background classes.
conv_cfg (dict): Dictionary to construct and config
conv layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use ``GN`` by default.
init_cfg (dict or list[dict], optional): Initialization config dict.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
fg_range=(1, 80),
bg_range=(81, 133),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0)):
super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg)
self.fg_range = fg_range
self.bg_range = bg_range
self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1
self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1
# Used feature layers are [start_level, end_level)
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg):
"""Merge thing classes to one class."""
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = (gt_semantic_seg >= self.fg_range[0]) * (
gt_semantic_seg <= self.fg_range[1])
bg_mask = (gt_semantic_seg >= self.bg_range[0]) * (
gt_semantic_seg <= self.bg_range[1])
new_gt_seg = fg_mask.int() * (self.bg_nums + 1)
new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums,
new_gt_seg)
return new_gt_seg
def loss(self, seg_preds, gt_semantic_seg, label_bias=-1):
"""The loss of PanopticFPN head.
Things classes will be merged to one class in PanopticFPN.
"""
gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)
return super().loss(seg_preds, gt_semantic_seg, label_bias)
def init_weights(self):
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x):
# the number of subnets must be not more than
# the length of features.
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(feats)
out = dict(seg_preds=seg_preds, feats=feats)
return out
|
import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from ..builder import HEADS
from ..utils import ConvUpsample
from .base_semantic_head import BaseSemanticHead
@HEADS.register_module()
class PanopticFPNHead(BaseSemanticHead):
"""PanopticFPNHead used in Panoptic FPN.
Arg:
num_classes (int): Number of classes, including all stuff
classes and one thing class.
in_channels (int): Number of channels in the input feature
map.
inner_channels (int): Number of channels in inner features.
start_level (int): The start level of the input features
used in PanopticFPN.
end_level (int): The end level of the used features, the
`end_level`-th layer will not be used.
fg_range (tuple): Range of the foreground classes.
bg_range (tuple): Range of the background classes.
conv_cfg (dict): Dictionary to construct and config
conv layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use ``GN`` by default.
init_cfg (dict or list[dict], optional): Initialization config dict.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
fg_range=(1, 80),
bg_range=(81, 133),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0)):
super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg)
self.fg_range = fg_range
self.bg_range = bg_range
self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1
self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1
# Used feature layers are [start_level, end_level)
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg):
"""Merge thing classes to one class."""
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = (gt_semantic_seg >= self.fg_range[0]) * (
gt_semantic_seg <= self.fg_range[1])
bg_mask = (gt_semantic_seg >= self.bg_range[0]) * (
gt_semantic_seg <= self.bg_range[1])
new_gt_seg = fg_mask.int() * (self.bg_nums + 1)
new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums,
new_gt_seg)
return new_gt_seg
def loss(self, seg_preds, gt_semantic_seg, label_bias=-1):
"""The loss of PanopticFPN head.
Things classes will be merged to one class in PanopticFPN.
"""
gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)
return super().loss(seg_preds, gt_semantic_seg, label_bias)
def init_weights(self):
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x):
# the number of subnets must be not more than
# the length of features.
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(feats)
out = dict(seg_preds=seg_preds, feats=feats)
return out
|
# coding=utf-8
# Copyright 2025 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import ConsisIDTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class ConsisIDTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = ConsisIDTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
id_vit_hidden = [torch.ones([batch_size, 2, 2]).to(torch_device)] * 1
id_cond = torch.ones(batch_size, 2).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"id_vit_hidden": id_vit_hidden,
"id_cond": id_cond,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"cross_attn_interval": 1,
"is_kps": False,
"is_train_face": True,
"cross_attn_dim_head": 1,
"cross_attn_num_heads": 1,
"LFE_id_dim": 2,
"LFE_vit_dim": 2,
"LFE_depth": 5,
"LFE_dim_head": 8,
"LFE_num_heads": 2,
"LFE_num_id_token": 1,
"LFE_num_querie": 1,
"LFE_output_dim": 10,
"LFE_ff_mult": 1,
"LFE_num_scale": 1,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"ConsisIDTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import ConsisIDTransformer3DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class ConsisIDTransformerTests(ModelTesterMixin, unittest.TestCase):
model_class = ConsisIDTransformer3DModel
main_input_name = "hidden_states"
uses_custom_attn_processor = True
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
num_frames = 1
height = 8
width = 8
embedding_dim = 8
sequence_length = 8
hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)
id_vit_hidden = [torch.ones([batch_size, 2, 2]).to(torch_device)] * 1
id_cond = torch.ones(batch_size, 2).to(torch_device)
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"timestep": timestep,
"id_vit_hidden": id_vit_hidden,
"id_cond": id_cond,
}
@property
def input_shape(self):
return (1, 4, 8, 8)
@property
def output_shape(self):
return (1, 4, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"num_attention_heads": 2,
"attention_head_dim": 8,
"in_channels": 4,
"out_channels": 4,
"time_embed_dim": 2,
"text_embed_dim": 8,
"num_layers": 1,
"sample_width": 8,
"sample_height": 8,
"sample_frames": 8,
"patch_size": 2,
"temporal_compression_ratio": 4,
"max_text_seq_length": 8,
"cross_attn_interval": 1,
"is_kps": False,
"is_train_face": True,
"cross_attn_dim_head": 1,
"cross_attn_num_heads": 1,
"LFE_id_dim": 2,
"LFE_vit_dim": 2,
"LFE_depth": 5,
"LFE_dim_head": 8,
"LFE_num_heads": 2,
"LFE_num_id_token": 1,
"LFE_num_querie": 1,
"LFE_output_dim": 10,
"LFE_ff_mult": 1,
"LFE_num_scale": 1,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_gradient_checkpointing_is_applied(self):
expected_set = {"ConsisIDTransformer3DModel"}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
class QueryEngineTool(AsyncBaseTool):
"""
Query engine tool.
A tool making use of a query engine.
Args:
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
resolve_input_errors: bool = True,
) -> None:
self._query_engine = query_engine
self._metadata = metadata
self._resolve_input_errors = resolve_input_errors
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
resolve_input_errors: bool = True,
) -> "QueryEngineTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(
name=name, description=description, return_direct=return_direct
)
return cls(
query_engine=query_engine,
metadata=metadata,
resolve_input_errors=resolve_input_errors,
)
@property
def query_engine(self) -> BaseQueryEngine:
return self._query_engine
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = self._get_query_str(*args, **kwargs)
response = self._query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.get_name(),
raw_input={"input": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = self._get_query_str(*args, **kwargs)
response = await self._query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.get_name(),
raw_input={"input": query_str},
raw_output=response,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
from llama_index.core.langchain_helpers.agents.tools import (
IndexToolConfig,
LlamaIndexTool,
)
tool_config = IndexToolConfig(
query_engine=self.query_engine,
name=self.metadata.get_name(),
description=self.metadata.description,
)
return LlamaIndexTool.from_tool_config(tool_config=tool_config)
def _get_query_str(self, *args: Any, **kwargs: Any) -> str:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError(
"Cannot call query engine without specifying `input` parameter."
)
return query_str
|
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
class QueryEngineTool(AsyncBaseTool):
"""
Query engine tool.
A tool making use of a query engine.
Args:
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
resolve_input_errors: bool = True,
) -> None:
self._query_engine = query_engine
self._metadata = metadata
self._resolve_input_errors = resolve_input_errors
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
resolve_input_errors: bool = True,
) -> "QueryEngineTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(
name=name, description=description, return_direct=return_direct
)
return cls(
query_engine=query_engine,
metadata=metadata,
resolve_input_errors=resolve_input_errors,
)
@property
def query_engine(self) -> BaseQueryEngine:
return self._query_engine
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = self._get_query_str(*args, **kwargs)
response = self._query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
query_str = self._get_query_str(*args, **kwargs)
response = await self._query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
from llama_index.core.langchain_helpers.agents.tools import (
IndexToolConfig,
LlamaIndexTool,
)
tool_config = IndexToolConfig(
query_engine=self.query_engine,
name=self.metadata.name,
description=self.metadata.description,
)
return LlamaIndexTool.from_tool_config(tool_config=tool_config)
def _get_query_str(self, *args: Any, **kwargs: Any) -> str:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError(
"Cannot call query engine without specifying `input` parameter."
)
return query_str
|
from jina.serve.runtimes.gateway.grpc.gateway import GRPCGateway
__all__ = ['GRPCGateway']
|
from jina.serve.runtimes.gateway.grpc.gateway import GRPCGateway
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
mlvl_anchors = [torch.ones(2, 5 * 5, 4)]
img_shape = None
scale_factor = [0.5, 0.5]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self._get_bboxes(
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=rescale)
|
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
mlvl_anchors = [torch.ones(2, 5 * 5, 4)]
img_shape = None
scale_factor = [0.5, 0.5]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self._get_bboxes(
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=rescale)
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataElement
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataElement]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[Tuple[Any, BaseDataElement]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.param_schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.param_schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, Optional, Sequence, Tuple
from mmengine.data import BaseDataSample
from mmengine.registry import HOOKS
from .hook import Hook
DATA_BATCH = Optional[Sequence[Tuple[Any, BaseDataSample]]]
@HOOKS.register_module()
class ParamSchedulerHook(Hook):
"""A hook to update some hyper-parameters in optimizer, e.g., learning rate
and momentum."""
priority = 'LOW'
def after_train_iter(self,
runner,
batch_idx: int,
data_batch: DATA_BATCH = None,
outputs: Optional[dict] = None) -> None:
"""Call step function for each scheduler after each iteration.
Args:
runner (Runner): The runner of the training process.
batch_idx (int): The index of the current batch in the train loop.
data_batch (Sequence[Tuple[Any, BaseDataSample]], optional): Data
from dataloader. In order to keep this interface consistent
with other hooks, we keep ``data_batch`` here.
Defaults to None.
outputs (dict, optional): Outputs from model.
In order to keep this interface consistent with other hooks, we
keep ``data_batch`` here. Defaults to None.
"""
for scheduler in runner.param_schedulers: # type: ignore
if not scheduler.by_epoch:
scheduler.step()
def after_train_epoch(self, runner) -> None:
"""Call step function for each scheduler after each epoch.
Args:
runner (Runner): The runner of the training process.
"""
for scheduler in runner.param_schedulers: # type: ignore
if scheduler.by_epoch:
scheduler.step()
|
import subprocess
import numpy as np
import pytest
from jina import Document, Flow
from timm_encoder import TimmImageEncoder
def test_with_batch():
flow = Flow().add(uses=TimmImageEncoder)
with flow:
resp = flow.post(
on="/test",
inputs=(
Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)
),
return_results=True,
)
assert len(resp[0].docs.get_attributes("embedding")) == 25
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
["jina", "executor", f"--uses=docker://{build_docker_image}"],
timeout=30,
check=True,
)
@pytest.mark.gpu
@pytest.mark.docker
def test_docker_runtime_gpu(build_docker_image_gpu: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
[
'jina',
'executor',
f'--uses=docker://{build_docker_image_gpu}',
'--gpus',
'all',
'--uses-with',
'device:cuda',
],
timeout=30,
check=True,
)
|
import subprocess
import numpy as np
import pytest
from jina import Document, Flow
from timm_encoder import TimmImageEncoder
def test_with_batch():
flow = Flow().add(uses=TimmImageEncoder)
with flow:
resp = flow.post(
on="/test",
inputs=(
Document(blob=np.ones((224, 224, 3), dtype=np.uint8)) for _ in range(25)
),
return_results=True,
)
assert len(resp[0].docs.get_attributes("embedding")) == 25
for r in resp:
for doc in r.docs:
assert doc.embedding is not None
assert doc.embedding.shape == (512,)
@pytest.mark.docker
def test_docker_runtime(build_docker_image: str):
with pytest.raises(subprocess.TimeoutExpired):
subprocess.run(
["jina", "executor", f"--uses=docker://{build_docker_image}"],
timeout=30,
check=True,
)
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Activation")
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function. It could be a callable, or the name of
an activation from the `keras.activations` namespace.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> layer = keras.layers.Activation('relu')
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
>>> layer = keras.layers.Activation(keras.activations.relu)
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
"""
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
self.built = True
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"activation": activations.serialize(self.activation)}
base_config = super().get_config()
return {**base_config, **config}
|
from keras.src import activations
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Activation")
class Activation(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function. It could be a callable, or the name of
an activation from the `keras.activations` namespace.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Example:
>>> layer = keras.layers.Activation('relu')
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
>>> layer = keras.layers.Activation(keras.activations.relu)
>>> layer([-3.0, -1.0, 0.0, 2.0])
[0.0, 0.0, 0.0, 2.0]
"""
def __init__(self, activation, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"activation": activations.serialize(self.activation)}
base_config = super().get_config()
return {**base_config, **config}
|
"""Test retriever tool."""
from typing import List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.schema import NodeWithScore, TextNode, QueryBundle
from llama_index.core.tools import RetrieverTool
from llama_index.core.postprocessor.types import BaseNodePostprocessor
import pytest
class MockRetriever(BaseRetriever):
"""Custom retriever for testing."""
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Mock retrieval."""
return [
NodeWithScore(
node=TextNode(
text=f"mock_{query_bundle}",
text_template="Metadata:\n{metadata_str}\n\nContent:\n{content}",
metadata_template="- {key}: {value}",
metadata={"key": "value"},
),
score=0.9,
)
]
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Mock retrieval."""
return [
NodeWithScore(
node=TextNode(
text=f"mock_{query_bundle}",
text_template="Metadata:\n{metadata_str}\n\nContent:\n{content}",
metadata_template="- {key}: {value}",
metadata={"key": "value"},
),
score=0.9,
)
]
class MockPostProcessor(BaseNodePostprocessor):
@classmethod
def class_name(cls) -> str:
return "CitationPostProcessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
for n in nodes:
prefix = f"processed_"
n.node.text = prefix + n.node.text
return nodes
def test_retriever_tool() -> None:
"""Test retriever tool."""
# Test retrieval
retriever = MockRetriever()
retriever_tool = RetrieverTool.from_defaults(retriever=retriever)
response_nodes = retriever_tool("hello world")
assert (
str(response_nodes)
== "Metadata:\n- key: value\n\nContent:\nmock_hello world\n\n"
)
assert response_nodes.raw_output[0].node.text == "mock_hello world\n\n"
# Test node_postprocessors
node_postprocessors = [MockPostProcessor()]
pr_retriever_tool = RetrieverTool.from_defaults(
retriever=retriever, node_postprocessors=node_postprocessors
)
pr_response_nodes = pr_retriever_tool("hello world")
assert (
str(pr_response_nodes)
== "Metadata:\n- key: value\n\nContent:\nprocessed_mock_hello world\n\n"
)
@pytest.mark.asyncio()
async def test_retriever_tool_async() -> None:
"""Test retriever tool async call."""
# Test async retrieval
retriever = MockRetriever()
retriever_tool = RetrieverTool.from_defaults(retriever=retriever)
response_nodes = await retriever_tool.acall("hello world")
assert (
str(response_nodes)
== "Metadata:\n- key: value\n\nContent:\nmock_hello world\n\n"
)
assert response_nodes.raw_output[0].node.text == "mock_hello world\n\n"
# Test node_postprocessors async
node_postprocessors = [MockPostProcessor()]
pr_retriever_tool = RetrieverTool.from_defaults(
retriever=retriever, node_postprocessors=node_postprocessors
)
pr_response_nodes = await pr_retriever_tool.acall("hello world")
assert (
str(pr_response_nodes)
== "Metadata:\n- key: value\n\nContent:\nprocessed_mock_hello world\n\n"
)
|
"""Test retriever tool."""
from typing import List, Optional
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.schema import NodeWithScore, TextNode, QueryBundle
from llama_index.core.tools import RetrieverTool
from llama_index.core.postprocessor.types import BaseNodePostprocessor
class MockRetriever(BaseRetriever):
"""Custom retriever for testing."""
def _retrieve(self, query_str: str) -> List[NodeWithScore]:
"""Mock retrieval."""
return [NodeWithScore(node=TextNode(text=f"mock_{query_str}"), score=0.9)]
async def _aretrieve(self, query_str: str) -> List[NodeWithScore]:
"""Mock retrieval."""
return [NodeWithScore(node=TextNode(text=f"mock_{query_str}"), score=0.9)]
class MockPostProcessor(BaseNodePostprocessor):
@classmethod
def class_name(cls) -> str:
return "CitationPostProcessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
for n in nodes:
prefix = f"processed_"
n.node.text = prefix + n.node.text
return nodes
def test_retriever_tool() -> None:
"""Test retriever tool."""
# Test retrieval
retriever = MockRetriever()
retriever_tool = RetrieverTool.from_defaults(retriever=retriever)
response_nodes = retriever_tool("hello world")
assert str(response_nodes) == "mock_hello world\n\n\n\n"
assert response_nodes.raw_output[0].node.text == "mock_hello world\n\n"
# Test node_postprocessors
node_postprocessors = [MockPostProcessor()]
pr_retriever_tool = RetrieverTool.from_defaults(
retriever=retriever, node_postprocessors=node_postprocessors
)
pr_response_nodes = pr_retriever_tool("hello world")
assert str(pr_response_nodes) == "processed_mock_hello world\n\n\n\n"
|
from typing import Optional
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array.any_array import AnyDocArray
from docarray.documents import TextDoc
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDoc):
sub_sub_text: TextDoc
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDoc):
sub_text: TextDoc
sub_da: DocList[SubSubDoc]
class MultiModalDoc(BaseDoc):
mm_text: TextDoc
mm_tensor: Optional[TorchTensor[3, 2, 2]] = None
mm_da: DocList[SubDoc]
docs = DocList[MultiModalDoc](
[
MultiModalDoc(
mm_text=TextDoc(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=TextDoc(text=f'sub_{i}_1'),
sub_da=DocList[SubSubDoc](
[
SubSubDoc(
sub_sub_text=TextDoc(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text__text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da__sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da__sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da__sub_da__sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.to_doc_vec()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDoc()
input_list = [DocList([doc, doc, doc])]
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
from typing import Optional
import pytest
import torch
from docarray import BaseDoc, DocList
from docarray.array.any_array import AnyDocArray
from docarray.documents import TextDoc
from docarray.typing import TorchTensor
num_docs = 5
num_sub_docs = 2
num_sub_sub_docs = 3
@pytest.fixture
def multi_model_docs():
class SubSubDoc(BaseDoc):
sub_sub_text: TextDoc
sub_sub_tensor: TorchTensor[2]
class SubDoc(BaseDoc):
sub_text: TextDoc
sub_da: DocList[SubSubDoc]
class MultiModalDoc(BaseDoc):
mm_text: TextDoc
mm_tensor: Optional[TorchTensor[3, 2, 2]]
mm_da: DocList[SubDoc]
docs = DocList[MultiModalDoc](
[
MultiModalDoc(
mm_text=TextDoc(text=f'hello{i}'),
mm_da=[
SubDoc(
sub_text=TextDoc(text=f'sub_{i}_1'),
sub_da=DocList[SubSubDoc](
[
SubSubDoc(
sub_sub_text=TextDoc(text='subsub'),
sub_sub_tensor=torch.zeros(2),
)
for _ in range(num_sub_sub_docs)
]
),
)
for _ in range(num_sub_docs)
],
)
for i in range(num_docs)
]
)
return docs
@pytest.mark.parametrize(
'access_path,len_result',
[
('mm_text', num_docs), # List of 5 Text objs
('mm_text__text', num_docs), # List of 5 strings
('mm_da', num_docs * num_sub_docs), # List of 5 * 2 SubDoc objs
('mm_da__sub_text', num_docs * num_sub_docs), # List of 5 * 2 Text objs
(
'mm_da__sub_da',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 SubSubDoc objs
(
'mm_da__sub_da__sub_sub_text',
num_docs * num_sub_docs * num_sub_sub_docs,
), # List of 5 * 2 * 3 Text objs
],
)
def test_traverse_flat(multi_model_docs, access_path, len_result):
traversed = multi_model_docs.traverse_flat(access_path)
assert len(traversed) == len_result
def test_traverse_stacked_da():
class Image(BaseDoc):
tensor: TorchTensor[3, 224, 224]
batch = DocList[Image](
[
Image(
tensor=torch.zeros(3, 224, 224),
)
for _ in range(2)
]
)
batch_stacked = batch.to_doc_vec()
tensors = batch_stacked.traverse_flat(access_path='tensor')
assert tensors.shape == (2, 3, 224, 224)
assert isinstance(tensors, torch.Tensor)
@pytest.mark.parametrize(
'input_list,output_list',
[
([1, 2, 3], [1, 2, 3]),
([[1], [2], [3]], [1, 2, 3]),
([[[1]], [[2]], [[3]]], [[1], [2], [3]]),
],
)
def test_flatten_one_level(input_list, output_list):
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == output_list
def test_flatten_one_level_list_of_da():
doc = BaseDoc()
input_list = [DocList([doc, doc, doc])]
flattened = AnyDocArray._flatten_one_level(sequence=input_list)
assert flattened == [doc, doc, doc]
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GuidedAnchorHead
def test_ga_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False))
head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
if torch.cuda.is_available():
head.cuda()
feat = [
torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
import mmcv
import torch
from mmdet.models.dense_heads import GuidedAnchorHead
def test_ga_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False))
head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
if torch.cuda.is_available():
head.cuda()
feat = [
torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_flax_available,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_pag_controlnet_sd"] = ["StableDiffusionControlNetPAGPipeline"]
_import_structure["pipeline_pag_controlnet_sd_inpaint"] = ["StableDiffusionControlNetPAGInpaintPipeline"]
_import_structure["pipeline_pag_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPAGPipeline"]
_import_structure["pipeline_pag_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetPAGImg2ImgPipeline"]
_import_structure["pipeline_pag_hunyuandit"] = ["HunyuanDiTPAGPipeline"]
_import_structure["pipeline_pag_kolors"] = ["KolorsPAGPipeline"]
_import_structure["pipeline_pag_pixart_sigma"] = ["PixArtSigmaPAGPipeline"]
_import_structure["pipeline_pag_sd"] = ["StableDiffusionPAGPipeline"]
_import_structure["pipeline_pag_sd_3"] = ["StableDiffusion3PAGPipeline"]
_import_structure["pipeline_pag_sd_3_img2img"] = ["StableDiffusion3PAGImg2ImgPipeline"]
_import_structure["pipeline_pag_sd_animatediff"] = ["AnimateDiffPAGPipeline"]
_import_structure["pipeline_pag_sd_img2img"] = ["StableDiffusionPAGImg2ImgPipeline"]
_import_structure["pipeline_pag_sd_inpaint"] = ["StableDiffusionPAGInpaintPipeline"]
_import_structure["pipeline_pag_sd_xl"] = ["StableDiffusionXLPAGPipeline"]
_import_structure["pipeline_pag_sd_xl_img2img"] = ["StableDiffusionXLPAGImg2ImgPipeline"]
_import_structure["pipeline_pag_sd_xl_inpaint"] = ["StableDiffusionXLPAGInpaintPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_pag_controlnet_sd import StableDiffusionControlNetPAGPipeline
from .pipeline_pag_controlnet_sd_inpaint import StableDiffusionControlNetPAGInpaintPipeline
from .pipeline_pag_controlnet_sd_xl import StableDiffusionXLControlNetPAGPipeline
from .pipeline_pag_controlnet_sd_xl_img2img import StableDiffusionXLControlNetPAGImg2ImgPipeline
from .pipeline_pag_hunyuandit import HunyuanDiTPAGPipeline
from .pipeline_pag_kolors import KolorsPAGPipeline
from .pipeline_pag_pixart_sigma import PixArtSigmaPAGPipeline
from .pipeline_pag_sd import StableDiffusionPAGPipeline
from .pipeline_pag_sd_3 import StableDiffusion3PAGPipeline
from .pipeline_pag_sd_3_img2img import StableDiffusion3PAGImg2ImgPipeline
from .pipeline_pag_sd_animatediff import AnimateDiffPAGPipeline
from .pipeline_pag_sd_img2img import StableDiffusionPAGImg2ImgPipeline
from .pipeline_pag_sd_inpaint import StableDiffusionPAGInpaintPipeline
from .pipeline_pag_sd_xl import StableDiffusionXLPAGPipeline
from .pipeline_pag_sd_xl_img2img import StableDiffusionXLPAGImg2ImgPipeline
from .pipeline_pag_sd_xl_inpaint import StableDiffusionXLPAGInpaintPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
from typing import TYPE_CHECKING
from ...utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_flax_available,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_pag_controlnet_sd"] = ["StableDiffusionControlNetPAGPipeline"]
_import_structure["pipeline_pag_controlnet_sd_inpaint"] = ["StableDiffusionControlNetPAGInpaintPipeline"]
_import_structure["pipeline_pag_controlnet_sd_xl"] = ["StableDiffusionXLControlNetPAGPipeline"]
_import_structure["pipeline_pag_controlnet_sd_xl_img2img"] = ["StableDiffusionXLControlNetPAGImg2ImgPipeline"]
_import_structure["pipeline_pag_hunyuandit"] = ["HunyuanDiTPAGPipeline"]
_import_structure["pipeline_pag_kolors"] = ["KolorsPAGPipeline"]
_import_structure["pipeline_pag_pixart_sigma"] = ["PixArtSigmaPAGPipeline"]
_import_structure["pipeline_pag_sd"] = ["StableDiffusionPAGPipeline"]
_import_structure["pipeline_pag_sd_3"] = ["StableDiffusion3PAGPipeline"]
_import_structure["pipeline_pag_sd_3_img2img"] = ["StableDiffusion3PAGImg2ImgPipeline"]
_import_structure["pipeline_pag_sd_animatediff"] = ["AnimateDiffPAGPipeline"]
_import_structure["pipeline_pag_sd_img2img"] = ["StableDiffusionPAGImg2ImgPipeline"]
_import_structure["pipeline_pag_sd_xl"] = ["StableDiffusionXLPAGPipeline"]
_import_structure["pipeline_pag_sd_xl_img2img"] = ["StableDiffusionXLPAGImg2ImgPipeline"]
_import_structure["pipeline_pag_sd_xl_inpaint"] = ["StableDiffusionXLPAGInpaintPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_pag_controlnet_sd import StableDiffusionControlNetPAGPipeline
from .pipeline_pag_controlnet_sd_inpaint import StableDiffusionControlNetPAGInpaintPipeline
from .pipeline_pag_controlnet_sd_xl import StableDiffusionXLControlNetPAGPipeline
from .pipeline_pag_controlnet_sd_xl_img2img import StableDiffusionXLControlNetPAGImg2ImgPipeline
from .pipeline_pag_hunyuandit import HunyuanDiTPAGPipeline
from .pipeline_pag_kolors import KolorsPAGPipeline
from .pipeline_pag_pixart_sigma import PixArtSigmaPAGPipeline
from .pipeline_pag_sd import StableDiffusionPAGPipeline
from .pipeline_pag_sd_3 import StableDiffusion3PAGPipeline
from .pipeline_pag_sd_3_img2img import StableDiffusion3PAGImg2ImgPipeline
from .pipeline_pag_sd_animatediff import AnimateDiffPAGPipeline
from .pipeline_pag_sd_img2img import StableDiffusionPAGImg2ImgPipeline
from .pipeline_pag_sd_xl import StableDiffusionXLPAGPipeline
from .pipeline_pag_sd_xl_img2img import StableDiffusionXLPAGImg2ImgPipeline
from .pipeline_pag_sd_xl_inpaint import StableDiffusionXLPAGInpaintPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.2", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""All minimum dependencies for scikit-learn."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
from collections import defaultdict
# scipy and cython should by in sync with pyproject.toml
NUMPY_MIN_VERSION = "1.22.0"
SCIPY_MIN_VERSION = "1.8.0"
JOBLIB_MIN_VERSION = "1.2.0"
THREADPOOLCTL_MIN_VERSION = "3.1.0"
PYTEST_MIN_VERSION = "7.1.2"
CYTHON_MIN_VERSION = "3.0.10"
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma separated tags)
dependent_packages = {
"numpy": (NUMPY_MIN_VERSION, "build, install"),
"scipy": (SCIPY_MIN_VERSION, "build, install"),
"joblib": (JOBLIB_MIN_VERSION, "install"),
"threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"),
"cython": (CYTHON_MIN_VERSION, "build"),
"meson-python": ("0.16.0", "build"),
"matplotlib": ("3.5.0", "benchmark, docs, examples, tests"),
"scikit-image": ("0.19.0", "docs, examples, tests"),
"pandas": ("1.4.0", "benchmark, docs, examples, tests"),
"seaborn": ("0.9.0", "docs, examples"),
"memory_profiler": ("0.57.0", "benchmark, docs"),
"pytest": (PYTEST_MIN_VERSION, "tests"),
"pytest-cov": ("2.9.0", "tests"),
"ruff": ("0.11.0", "tests"),
"black": ("24.3.0", "tests"),
"mypy": ("1.15", "tests"),
"pyamg": ("4.2.1", "tests"),
"polars": ("0.20.30", "docs, tests"),
"pyarrow": ("12.0.0", "tests"),
"sphinx": ("7.3.7", "docs"),
"sphinx-copybutton": ("0.5.2", "docs"),
"sphinx-gallery": ("0.17.1", "docs"),
"numpydoc": ("1.2.0", "docs, tests"),
"Pillow": ("8.4.0", "docs"),
"pooch": ("1.6.0", "docs, examples, tests"),
"sphinx-prompt": ("1.4.0", "docs"),
"sphinxext-opengraph": ("0.9.1", "docs"),
"plotly": ("5.14.0", "docs, examples"),
"sphinxcontrib-sass": ("0.3.4", "docs"),
"sphinx-remove-toctrees": ("1.0.0.post1", "docs"),
"sphinx-design": ("0.6.0", "docs"),
"pydata-sphinx-theme": ("0.15.3", "docs"),
"towncrier": ("24.8.0", "docs"),
# XXX: Pin conda-lock to the latest released version (needs manual update
# from time to time)
"conda-lock": ("2.5.7", "maintenance"),
}
# create inverse mapping for setuptools
tag_to_packages: dict = defaultdict(list)
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(", "):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get min dependencies for a package")
parser.add_argument("package", choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
|
"""Unittests for langchain.agents.chat package."""
from langchain_core.agents import AgentAction
from langchain.agents.chat.output_parser import ChatOutputParser
output_parser = ChatOutputParser()
def get_action_and_input(text: str) -> tuple[str, str]:
output = output_parser.parse(text)
if isinstance(output, AgentAction):
return output.tool, str(output.tool_input)
return "Final Answer", output.return_values["output"]
def test_parse_with_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
def test_parse_without_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
|
"""Unittests for langchain.agents.chat package."""
from langchain_core.agents import AgentAction
from langchain.agents.chat.output_parser import ChatOutputParser
output_parser = ChatOutputParser()
def get_action_and_input(text: str) -> tuple[str, str]:
output = output_parser.parse(text)
if isinstance(output, AgentAction):
return output.tool, str(output.tool_input)
else:
return "Final Answer", output.return_values["output"]
def test_parse_with_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```json
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
def test_parse_without_language() -> None:
llm_output = """I can use the `foo` tool to achieve the goal.
Action:
```
{
"action": "foo",
"action_input": "bar"
}
```
"""
action, action_input = get_action_and_input(llm_output)
assert action == "foo"
assert action_input == "bar"
|
from typing import Any
def _resolve_schema_references(schema: Any, definitions: dict[str, Any]) -> Any:
"""
Resolve the $ref keys in a JSON schema object using the provided definitions.
"""
if isinstance(schema, list):
for i, item in enumerate(schema):
schema[i] = _resolve_schema_references(item, definitions)
elif isinstance(schema, dict):
if "$ref" in schema:
ref_key = schema.pop("$ref").split("/")[-1]
ref = definitions.get(ref_key, {})
schema.update(ref)
else:
for key, value in schema.items():
schema[key] = _resolve_schema_references(value, definitions)
return schema
def _convert_schema(schema: dict) -> dict:
props = {k: {"title": k, **v} for k, v in schema["properties"].items()}
return {
"type": "object",
"properties": props,
"required": schema.get("required", []),
}
def get_llm_kwargs(function: dict) -> dict:
"""Return the kwargs for the LLMChain constructor.
Args:
function: The function to use.
Returns:
The kwargs for the LLMChain constructor.
"""
return {"functions": [function], "function_call": {"name": function["name"]}}
|
from typing import Any, Dict
def _resolve_schema_references(schema: Any, definitions: Dict[str, Any]) -> Any:
"""
Resolve the $ref keys in a JSON schema object using the provided definitions.
"""
if isinstance(schema, list):
for i, item in enumerate(schema):
schema[i] = _resolve_schema_references(item, definitions)
elif isinstance(schema, dict):
if "$ref" in schema:
ref_key = schema.pop("$ref").split("/")[-1]
ref = definitions.get(ref_key, {})
schema.update(ref)
else:
for key, value in schema.items():
schema[key] = _resolve_schema_references(value, definitions)
return schema
def _convert_schema(schema: dict) -> dict:
props = {k: {"title": k, **v} for k, v in schema["properties"].items()}
return {
"type": "object",
"properties": props,
"required": schema.get("required", []),
}
def get_llm_kwargs(function: dict) -> dict:
"""Return the kwargs for the LLMChain constructor.
Args:
function: The function to use.
Returns:
The kwargs for the LLMChain constructor.
"""
return {"functions": [function], "function_call": {"name": function["name"]}}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINTransfer
from langchain_community.tools.ainetwork.transfer import TransferSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TransferSchema": "langchain_community.tools.ainetwork.transfer",
"AINTransfer": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINTransfer",
"TransferSchema",
]
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import AINTransfer
from langchain_community.tools.ainetwork.transfer import TransferSchema
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"TransferSchema": "langchain_community.tools.ainetwork.transfer",
"AINTransfer": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"TransferSchema",
"AINTransfer",
]
|
import os
import pytest
from jina.orchestrate.deployments import Deployment
@pytest.fixture()
def cuda_total_devices(request):
old_cuda_total_devices = os.environ.get('CUDA_TOTAL_DEVICES', None)
os.environ['CUDA_TOTAL_DEVICES'] = str(request.param)
yield
if old_cuda_total_devices is not None:
os.environ['CUDA_TOTAL_DEVICES'] = old_cuda_total_devices
else:
os.unsetenv('CUDA_TOTAL_DEVICES')
@pytest.mark.parametrize(
'device_str, replicas, expected, cuda_total_devices',
[
['1', 1, None, 3], # wont trigger device RB
['1', 2, None, 3], # wont trigger device RB
['1,2', 2, None, 3], # wont trigger device RB
['RR', 2, {0: 0, 1: 1}, 3],
['RR', 5, {0: 0, 1: 1, 2: 2, 3: 0, 4: 1}, 3],
['RR1:', 5, {0: 1, 1: 2, 2: 1, 3: 2, 4: 1}, 3],
['RR0:2', 5, {0: 0, 1: 1, 2: 0, 3: 1, 4: 0}, 3],
['RR1:2', 2, {0: 1, 1: 1}, 3],
['RR1:2', 1, {0: 1}, 3],
['RR0,2,3', 3, {0: 0, 1: 2, 2: 3}, 4],
['RR0,2,3', 5, {0: 0, 1: 2, 2: 3, 3: 0, 4: 2}, 4],
], indirect=['cuda_total_devices']
)
def test_cuda_assignment(device_str, replicas, expected, cuda_total_devices):
actual = Deployment._roundrobin_cuda_device(device_str, replicas)
assert actual == expected
|
import os
import pytest
from jina.orchestrate.deployments import Deployment
@pytest.mark.parametrize(
'device_str, replicas, expected',
[
['1', 1, None], # wont trigger device RB
['1', 2, None], # wont trigger device RB
['1,2', 2, None], # wont trigger device RB
['RR', 2, {0: 0, 1: 1}],
['RR', 5, {0: 0, 1: 1, 2: 2, 3: 0, 4: 1}],
['RR1:', 5, {0: 1, 1: 2, 2: 1, 3: 2, 4: 1}],
['RR0:2', 5, {0: 0, 1: 1, 2: 0, 3: 1, 4: 0}],
['RR1:2', 2, {0: 1, 1: 1}],
['RR1:2', 1, {0: 1}],
],
)
def test_cuda_assignment(device_str, replicas, expected):
os.environ['CUDA_TOTAL_DEVICES'] = str(3)
actual = Deployment._roundrobin_cuda_device(device_str, replicas)
assert actual == expected
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
|
import pytest
@pytest.mark.compile
def test_placeholder() -> None:
"""Used for compiling integration tests without running any real tests."""
pass
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""[BETA] Set the return type of torch operations on datapoints.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
Can be used as a global flag for the entire program:
.. code:: python
img = datapoints.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("datapoints")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = datapoints.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("datapoints"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "datapoint" or "tensor". Default is "tensor".
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "datapoint": True}[return_type.lower()]
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
import torch
_TORCHFUNCTION_SUBCLASS = False
class _ReturnTypeCM:
def __init__(self, to_restore):
self.to_restore = to_restore
def __enter__(self):
return self
def __exit__(self, *args):
global _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = self.to_restore
def set_return_type(return_type: str):
"""Set the return type of torch operations on datapoints.
This only affects the behaviour of torch operations. It has no effect on
``torchvision`` transforms or functionals, which will always return as
output the same type that was passed as input.
Can be used as a global flag for the entire program:
.. code:: python
img = datapoints.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor (default behaviour)
set_return_type("datapoints")
img + 2 # This is an Image
or as a context manager to restrict the scope:
.. code:: python
img = datapoints.Image(torch.rand(3, 5, 5))
img + 2 # This is a pure Tensor
with set_return_type("datapoints"):
img + 2 # This is an Image
img + 2 # This is a pure Tensor
Args:
return_type (str): Can be "datapoint" or "tensor". Default is "tensor".
"""
global _TORCHFUNCTION_SUBCLASS
to_restore = _TORCHFUNCTION_SUBCLASS
_TORCHFUNCTION_SUBCLASS = {"tensor": False, "datapoint": True}[return_type.lower()]
return _ReturnTypeCM(to_restore)
def _must_return_subclass():
return _TORCHFUNCTION_SUBCLASS
# For those ops we always want to preserve the original subclass instead of returning a pure Tensor
_FORCE_TORCHFUNCTION_SUBCLASS = {torch.Tensor.clone, torch.Tensor.to, torch.Tensor.detach, torch.Tensor.requires_grad_}
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
def __post_init__(self):
super().__post_init__()
if self.generator is None:
raise ValueError("generator must be specified")
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex
|
from dataclasses import dataclass
from typing import Callable, Optional
import datasets
@dataclass
class GeneratorConfig(datasets.BuilderConfig):
generator: Optional[Callable] = None
gen_kwargs: Optional[dict] = None
features: Optional[datasets.Features] = None
def __post_init__(self):
assert self.generator is not None, "generator must be specified"
if self.gen_kwargs is None:
self.gen_kwargs = {}
class Generator(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = GeneratorConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
def _generate_examples(self, **gen_kwargs):
for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
yield idx, ex
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.15.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.9.10",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"optimum_quanto": "optimum_quanto>=0.2.6",
"gguf": "gguf>=0.10.0",
"torchao": "torchao>=0.7.0",
"bitsandbytes": "bitsandbytes>=0.43.3",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
"opencv-python": "opencv-python",
}
|
# THIS FILE HAS BEEN AUTOGENERATED. To update:
# 1. modify the `_deps` dict in setup.py
# 2. run `make deps_table_update`
deps = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.31.0",
"compel": "compel==0.1.8",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.27.0",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark>=0.2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.4.1",
"jaxlib": "jaxlib>=0.4.1",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"parameterized": "parameterized",
"peft": "peft>=0.6.0",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ruff": "ruff==0.9.10",
"safetensors": "safetensors>=0.3.1",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"GitPython": "GitPython<3.1.19",
"scipy": "scipy",
"onnx": "onnx",
"optimum_quanto": "optimum_quanto>=0.2.6",
"gguf": "gguf>=0.10.0",
"torchao": "torchao>=0.7.0",
"bitsandbytes": "bitsandbytes>=0.43.3",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"tiktoken": "tiktoken>=0.7.0",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.41.2",
"urllib3": "urllib3<=2.0.0",
"black": "black",
"phonemizer": "phonemizer",
"opencv-python": "opencv-python",
}
|
import os
import numpy as np
import pytest
from docarray import BaseDoc, DocArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDoc):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocArray[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocArray[MyDoc]()
da_generator = DocArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
import os
import numpy as np
import pytest
from docarray import BaseDocument, DocumentArray
from docarray.documents import ImageDoc
from docarray.typing import NdArray
class MyDoc(BaseDocument):
embedding: NdArray
text: str
image: ImageDoc
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocumentArray[MyDoc](
[
MyDoc(
embedding=[1, 2, 3, 4, 5], text='hello', image=ImageDoc(url='aux.png')
),
MyDoc(embedding=[5, 4, 3, 2, 1], text='hello world', image=ImageDoc()),
]
)
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
assert len(da2) == 2
assert len(da) == len(da2)
for d1, d2 in zip(da, da2):
assert d1.embedding.tolist() == d2.embedding.tolist()
assert d1.text == d2.text
assert d1.image.url == d2.image.url
assert da[1].image.url is None
assert da2[1].image.url is None
@pytest.mark.slow
@pytest.mark.parametrize(
'protocol', ['pickle-array', 'protobuf-array', 'protobuf', 'pickle']
)
@pytest.mark.parametrize('compress', ['lz4', 'bz2', 'lzma', 'zlib', 'gzip', None])
@pytest.mark.parametrize('show_progress', [False, True])
def test_array_save_load_binary_streaming(protocol, compress, tmp_path, show_progress):
tmp_file = os.path.join(tmp_path, 'test')
da = DocumentArray[MyDoc]()
def _extend_da(num_docs=100):
for _ in range(num_docs):
da.extend(
[
MyDoc(
embedding=np.random.rand(3, 2),
text='hello',
image=ImageDoc(url='aux.png'),
),
]
)
_extend_da()
da.save_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
da2 = DocumentArray[MyDoc]()
da_generator = DocumentArray[MyDoc].load_binary(
tmp_file, protocol=protocol, compress=compress, show_progress=show_progress
)
for i, doc in enumerate(da_generator):
assert doc.id == da[i].id
assert doc.text == da[i].text
assert doc.image.url == da[i].image.url
da2.append(doc)
assert len(da2) == 100
|
from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class ExplicitImageInput(BaseModel):
query: HttpUrl = Field(description="url of the image to analyze")
class EdenAiExplicitImageTool(EdenaiTool):
"""Tool that queries the Eden AI Explicit image detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_image_explicit_content_detection"
description: str = (
"A wrapper around edenai Services Explicit image detection. "
"""Useful for when you have to extract Explicit Content from images.
it detects adult only content in images,
that is generally inappropriate for people under
the age of 18 and includes nudity, sexual activity,
pornography, violence, gore content, etc."""
"Input should be the string url of the image ."
)
args_schema: Type[BaseModel] = ExplicitImageInput
combine_available: bool = True
feature: str = "image"
subfeature: str = "explicit_content"
def _parse_json(self, json_data: dict) -> str:
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"
for idx, found_obj in enumerate(json_data["items"]):
label = found_obj["label"].lower()
likelihood = found_obj["likelihood"]
result_str += f"{idx}: {label} likelihood {likelihood},\n"
return result_str[:-2]
def _parse_response(self, json_data: list) -> str:
if len(json_data) == 1:
result = self._parse_json(json_data[0])
else:
for entry in json_data:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
|
from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class ExplicitImageInput(BaseModel):
query: HttpUrl = Field(description="url of the image to analyze")
class EdenAiExplicitImageTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Explicit image detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_image_explicit_content_detection"
description: str = (
"A wrapper around edenai Services Explicit image detection. "
"""Useful for when you have to extract Explicit Content from images.
it detects adult only content in images,
that is generally inappropriate for people under
the age of 18 and includes nudity, sexual activity,
pornography, violence, gore content, etc."""
"Input should be the string url of the image ."
)
args_schema: Type[BaseModel] = ExplicitImageInput
combine_available: bool = True
feature: str = "image"
subfeature: str = "explicit_content"
def _parse_json(self, json_data: dict) -> str:
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"
for idx, found_obj in enumerate(json_data["items"]):
label = found_obj["label"].lower()
likelihood = found_obj["likelihood"]
result_str += f"{idx}: {label} likelihood {likelihood},\n"
return result_str[:-2]
def _parse_response(self, json_data: list) -> str:
if len(json_data) == 1:
result = self._parse_json(json_data[0])
else:
for entry in json_data:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
|
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from ..models.attention import FeedForward, LuminaFeedForward
from ..models.attention_processor import Attention, MochiAttention
_ATTENTION_CLASSES = (Attention, MochiAttention)
_FEEDFORWARD_CLASSES = (FeedForward, LuminaFeedForward)
_SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks", "single_transformer_blocks", "layers")
_TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS = ("temporal_transformer_blocks",)
_CROSS_TRANSFORMER_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks", "layers")
_ALL_TRANSFORMER_BLOCK_IDENTIFIERS = tuple(
{
*_SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS,
*_TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS,
*_CROSS_TRANSFORMER_BLOCK_IDENTIFIERS,
}
)
def _get_submodule_from_fqn(module: torch.nn.Module, fqn: str) -> Optional[torch.nn.Module]:
for submodule_name, submodule in module.named_modules():
if submodule_name == fqn:
return submodule
return None
|
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.attention_processor import Attention, MochiAttention
_ATTENTION_CLASSES = (Attention, MochiAttention)
_SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks", "single_transformer_blocks", "layers")
_TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS = ("temporal_transformer_blocks",)
_CROSS_TRANSFORMER_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks", "layers")
_ALL_TRANSFORMER_BLOCK_IDENTIFIERS = tuple(
{
*_SPATIAL_TRANSFORMER_BLOCK_IDENTIFIERS,
*_TEMPORAL_TRANSFORMER_BLOCK_IDENTIFIERS,
*_CROSS_TRANSFORMER_BLOCK_IDENTIFIERS,
}
)
|
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes')
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Docker Compose')
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.config_path).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
import json
from jina.orchestrate.flow.base import Flow
from jina.orchestrate.deployments import Deployment
from jina.jaml import JAML
from jina.logging.predefined import default_logger
from jina.schemas import get_full_schema
from jina_cli.export import api_to_dict
def export_kubernetes(args):
"""Export to k8s yaml files
:param args: args from CLI
"""
from jina.jaml import JAMLCompatible
obj = JAMLCompatible.load_config(args.config_path)
if isinstance(obj, (Flow, Deployment)):
obj.to_kubernetes_yaml(
output_base_path=args.outpath, k8s_namespace=args.k8s_namespace
)
else:
raise NotImplementedError(f'Object of class {obj.__class__.__name__} cannot be exported to Kubernetes')
def export_docker_compose(args):
"""Export to Docker compose yaml files
:param args: args from CLI
"""
Flow.load_config(args.config_path).to_docker_compose_yaml(
output_path=args.outpath, network_name=args.network_name
)
def export_flowchart(args):
"""Export to flowchart file
:param args: args from CLI
"""
Flow.load_config(args.config_path).plot(
args.outpath, vertical_layout=args.vertical_layout
)
def export_schema(args):
"""Export to JSON Schemas
:param args: args from CLI
"""
from jina import __version__
if args.yaml_path:
dump_api = api_to_dict()
for yp in args.yaml_path:
f_name = (yp % __version__) if '%s' in yp else yp
with open(f_name, 'w', encoding='utf8') as fp:
JAML.dump(dump_api, fp)
default_logger.info(f'API is exported to {f_name}')
if args.json_path:
dump_api = api_to_dict()
for jp in args.json_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
if args.schema_path:
dump_api = get_full_schema()
for jp in args.schema_path:
f_name = (jp % __version__) if '%s' in jp else jp
with open(f_name, 'w', encoding='utf8') as fp:
json.dump(dump_api, fp, sort_keys=True)
default_logger.info(f'API is exported to {f_name}')
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import BinaryCrossentropy
from keras.src.losses.losses import BinaryFocalCrossentropy
from keras.src.losses.losses import CTC
from keras.src.losses.losses import CategoricalCrossentropy
from keras.src.losses.losses import CategoricalFocalCrossentropy
from keras.src.losses.losses import CategoricalHinge
from keras.src.losses.losses import CosineSimilarity
from keras.src.losses.losses import Dice
from keras.src.losses.losses import Hinge
from keras.src.losses.losses import Huber
from keras.src.losses.losses import KLDivergence
from keras.src.losses.losses import LogCosh
from keras.src.losses.losses import MeanAbsoluteError
from keras.src.losses.losses import MeanAbsolutePercentageError
from keras.src.losses.losses import MeanSquaredError
from keras.src.losses.losses import MeanSquaredLogarithmicError
from keras.src.losses.losses import Poisson
from keras.src.losses.losses import SparseCategoricalCrossentropy
from keras.src.losses.losses import SquaredHinge
from keras.src.losses.losses import Tversky
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import cosine_similarity
from keras.src.losses.losses import ctc
from keras.src.losses.losses import dice
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence
from keras.src.losses.losses import log_cosh
from keras.src.losses.losses import mean_absolute_error
from keras.src.losses.losses import mean_absolute_percentage_error
from keras.src.losses.losses import mean_squared_error
from keras.src.losses.losses import mean_squared_logarithmic_error
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.losses.losses import tversky
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.legacy.losses import Reduction
from keras.src.losses.losses import kl_divergence as KLD
from keras.src.losses.losses import kl_divergence as kld
from keras.src.losses.losses import kl_divergence as kullback_leibler_divergence
from keras.src.losses.losses import log_cosh as logcosh
from keras.src.losses.losses import mean_absolute_error as MAE
from keras.src.losses.losses import mean_absolute_error as mae
from keras.src.losses.losses import mean_absolute_percentage_error as MAPE
from keras.src.losses.losses import mean_absolute_percentage_error as mape
from keras.src.losses.losses import mean_squared_error as MSE
from keras.src.losses.losses import mean_squared_error as mse
from keras.src.losses.losses import mean_squared_logarithmic_error as MSLE
from keras.src.losses.losses import mean_squared_logarithmic_error as msle
|
"""DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses import deserialize
from keras.src.losses import get
from keras.src.losses import serialize
from keras.src.losses.loss import Loss
from keras.src.losses.losses import CTC
from keras.src.losses.losses import BinaryCrossentropy
from keras.src.losses.losses import BinaryFocalCrossentropy
from keras.src.losses.losses import CategoricalCrossentropy
from keras.src.losses.losses import CategoricalFocalCrossentropy
from keras.src.losses.losses import CategoricalHinge
from keras.src.losses.losses import CosineSimilarity
from keras.src.losses.losses import Dice
from keras.src.losses.losses import Hinge
from keras.src.losses.losses import Huber
from keras.src.losses.losses import KLDivergence
from keras.src.losses.losses import LogCosh
from keras.src.losses.losses import MeanAbsoluteError
from keras.src.losses.losses import MeanAbsolutePercentageError
from keras.src.losses.losses import MeanSquaredError
from keras.src.losses.losses import MeanSquaredLogarithmicError
from keras.src.losses.losses import Poisson
from keras.src.losses.losses import SparseCategoricalCrossentropy
from keras.src.losses.losses import SquaredHinge
from keras.src.losses.losses import Tversky
from keras.src.losses.losses import binary_crossentropy
from keras.src.losses.losses import binary_focal_crossentropy
from keras.src.losses.losses import categorical_crossentropy
from keras.src.losses.losses import categorical_focal_crossentropy
from keras.src.losses.losses import categorical_hinge
from keras.src.losses.losses import cosine_similarity
from keras.src.losses.losses import ctc
from keras.src.losses.losses import dice
from keras.src.losses.losses import hinge
from keras.src.losses.losses import huber
from keras.src.losses.losses import kl_divergence
from keras.src.losses.losses import log_cosh
from keras.src.losses.losses import mean_absolute_error
from keras.src.losses.losses import mean_absolute_percentage_error
from keras.src.losses.losses import mean_squared_error
from keras.src.losses.losses import mean_squared_logarithmic_error
from keras.src.losses.losses import poisson
from keras.src.losses.losses import sparse_categorical_crossentropy
from keras.src.losses.losses import squared_hinge
from keras.src.losses.losses import tversky
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
from pathlib import Path
from typing import Any, Callable, Optional, Tuple
import PIL.Image
from .folder import make_dataset
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
class RenderedSST2(VisionDataset):
"""`The Rendered SST2 Dataset <https://github.com/openai/CLIP/blob/main/data/rendered-sst2.md>`_.
Rendered SST2 is an image classification dataset used to evaluate the models capability on optical
character recognition. This dataset was generated by rendering sentences in the Standford Sentiment
Treebank v2 dataset.
This dataset contains two classes (positive and negative) and is divided in three splits: a train
split containing 6920 images (3610 positive and 3310 negative), a validation split containing 872 images
(444 positive and 428 negative), and a test split containing 1821 images (909 positive and 912 negative).
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"train"`` (default), `"val"` and ``"test"``.
transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again. Default is False.
"""
_URL = "https://openaipublic.azureedge.net/clip/data/rendered-sst2.tgz"
_MD5 = "2384d08e9dcfa4bd55b324e610496ee5"
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
self._split = verify_str_arg(split, "split", ("train", "val", "test"))
self._split_to_folder = {"train": "train", "val": "valid", "test": "test"}
self._base_folder = Path(self.root) / "rendered-sst2"
self.classes = ["negative", "positive"]
self.class_to_idx = {"negative": 0, "positive": 1}
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
self._samples = make_dataset(str(self._base_folder / self._split_to_folder[self._split]), extensions=("png",))
def __len__(self) -> int:
return len(self._samples)
def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image_file, label = self._samples[idx]
image = PIL.Image.open(image_file).convert("RGB")
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
def extra_repr(self) -> str:
return f"split={self._split}"
def _check_exists(self) -> bool:
for class_label in set(self.classes):
if not (self._base_folder / self._split_to_folder[self._split] / class_label).is_dir():
return False
return True
def _download(self) -> None:
if self._check_exists():
return
download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5)
|
"""Test MistralAI Embedding."""
from langchain_mistralai import MistralAIEmbeddings
def test_mistralai_embedding_documents() -> None:
"""Test MistralAI embeddings for documents."""
documents = ["foo bar", "test document"]
embedding = MistralAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1024
def test_mistralai_embedding_query() -> None:
"""Test MistralAI embeddings for query."""
document = "foo bar"
embedding = MistralAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1024
async def test_mistralai_embedding_documents_async() -> None:
"""Test MistralAI embeddings for documents."""
documents = ["foo bar", "test document"]
embedding = MistralAIEmbeddings()
output = await embedding.aembed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1024
async def test_mistralai_embedding_query_async() -> None:
"""Test MistralAI embeddings for query."""
document = "foo bar"
embedding = MistralAIEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1024
def test_mistralai_embedding_documents_long() -> None:
"""Test MistralAI embeddings for documents."""
documents = ["foo bar " * 1000, "test document " * 1000] * 5
embedding = MistralAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 10
assert len(output[0]) == 1024
def test_mistralai_embed_query_character() -> None:
"""Test MistralAI embeddings for query."""
document = "😳"
embedding = MistralAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1024
|
"""Test MistralAI Embedding"""
from langchain_mistralai import MistralAIEmbeddings
def test_mistralai_embedding_documents() -> None:
"""Test MistralAI embeddings for documents."""
documents = ["foo bar", "test document"]
embedding = MistralAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1024
def test_mistralai_embedding_query() -> None:
"""Test MistralAI embeddings for query."""
document = "foo bar"
embedding = MistralAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1024
async def test_mistralai_embedding_documents_async() -> None:
"""Test MistralAI embeddings for documents."""
documents = ["foo bar", "test document"]
embedding = MistralAIEmbeddings()
output = await embedding.aembed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 1024
async def test_mistralai_embedding_query_async() -> None:
"""Test MistralAI embeddings for query."""
document = "foo bar"
embedding = MistralAIEmbeddings()
output = await embedding.aembed_query(document)
assert len(output) == 1024
def test_mistralai_embedding_documents_long() -> None:
"""Test MistralAI embeddings for documents."""
documents = ["foo bar " * 1000, "test document " * 1000] * 5
embedding = MistralAIEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 10
assert len(output[0]) == 1024
def test_mistralai_embed_query_character() -> None:
"""Test MistralAI embeddings for query."""
document = "😳"
embedding = MistralAIEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 1024
|
from __future__ import annotations
import torch.nn as nn
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
# TODO: Not yet for sparse might want it ?
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, SparseCoSENTLoss is recommended.
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than SparseCosineSimilarityLoss.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCosineSimilarityLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
|
from __future__ import annotations
import torch.nn as nn
from sentence_transformers.losses.CosineSimilarityLoss import CosineSimilarityLoss
from sentence_transformers.sparse_encoder.SparseEncoder import SparseEncoder
class SparseCosineSimilarityLoss(CosineSimilarityLoss):
def __init__(
self,
model: SparseEncoder,
loss_fct: nn.Module = nn.MSELoss(),
cos_score_transformation: nn.Module = nn.Identity(),
) -> None:
"""
SparseCosineSimilarityLoss expects that the InputExamples consists of two texts and a float label. It computes the
vectors ``u = model(sentence_A)`` and ``v = model(sentence_B)`` and measures the cosine-similarity between the two.
By default, it minimizes the following loss: ``||input_label - cos_score_transformation(cosine_sim(u,v))||_2``.
Args:
model: SparseEncoder model
loss_fct: Which pytorch loss function should be used to
compare the ``cosine_similarity(u, v)`` with the
input_label? By default, MSE is used: ``||input_label -
cosine_sim(u, v)||_2``
cos_score_transformation: The cos_score_transformation
function is applied on top of cosine_similarity. By
default, the identify function is used (i.e. no change).
References:
# TODO: Not yet for sparse might want it ?
- `Training Examples > Semantic Textual Similarity <../../../examples/sentence_transformer/training/sts/README.html>`_
Requirements:
1. Sentence pairs with corresponding similarity scores in range `[0, 1]`
Inputs:
+--------------------------------+------------------------+
| Texts | Labels |
+================================+========================+
| (sentence_A, sentence_B) pairs | float similarity score |
+--------------------------------+------------------------+
Relations:
- :class:`SparseCoSENTLoss` seems to produce a stronger training signal than CosineSimilarityLoss. In our experiments, SparseCoSENTLoss is recommended.
- :class:`SparseAnglELoss` is :class:`SparseCoSENTLoss` with ``pairwise_angle_sim`` as the metric, rather than ``pairwise_cos_sim``. It also produces a stronger training signal than SparseCosineSimilarityLoss.
Example:
::
from datasets import Dataset
from sentence_transformers.sparse_encoder import SparseEncoder, SparseEncoderTrainer, losses
model = SparseEncoder("distilbert/distilbert-base-uncased")
train_dataset = Dataset.from_dict(
{
"sentence1": ["It's nice weather outside today.", "He drove to work."],
"sentence2": ["It's so sunny.", "She walked to the store."],
"score": [1.0, 0.3],
}
)
loss = losses.SparseCosineSimilarityLoss(model)
trainer = SparseEncoderTrainer(model=model, train_dataset=train_dataset, loss=loss)
trainer.train()
"""
return super().__init__(model, loss_fct=loss_fct, cos_score_transformation=cos_score_transformation)
|
from __future__ import annotations
from typing import Any
import torch
from ._tv_tensor import TVTensor
class Video(TVTensor):
""":class:`torch.Tensor` subclass for videos with shape ``[..., T, C, H, W]``.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the video is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: torch.dtype | None = None,
device: torch.device | str | int | None = None,
requires_grad: bool | None = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
from __future__ import annotations
from typing import Any, Optional, Union
import torch
from ._tv_tensor import TVTensor
class Video(TVTensor):
""":class:`torch.Tensor` subclass for videos with shape ``[..., T, C, H, W]``.
Args:
data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`.
dtype (torch.dtype, optional): Desired data type. If omitted, will be inferred from
``data``.
device (torch.device, optional): Desired device. If omitted and ``data`` is a
:class:`torch.Tensor`, the device is taken from it. Otherwise, the video is constructed on the CPU.
requires_grad (bool, optional): Whether autograd should record operations. If omitted and
``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``.
"""
def __new__(
cls,
data: Any,
*,
dtype: Optional[torch.dtype] = None,
device: Optional[Union[torch.device, str, int]] = None,
requires_grad: Optional[bool] = None,
) -> Video:
tensor = cls._to_tensor(data, dtype=dtype, device=device, requires_grad=requires_grad)
if data.ndim < 4:
raise ValueError
return tensor.as_subclass(cls)
def __repr__(self, *, tensor_contents: Any = None) -> str: # type: ignore[override]
return self._make_repr()
|
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.utils import Registry
MODELS = Registry('models', parent=MMCV_MODELS)
BACKBONES = MODELS
NECKS = MODELS
ROI_EXTRACTORS = MODELS
SHARED_HEADS = MODELS
HEADS = MODELS
LOSSES = MODELS
DETECTORS = MODELS
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
def build_roi_extractor(cfg):
"""Build roi extractor."""
return ROI_EXTRACTORS.build(cfg)
def build_shared_head(cfg):
"""Build shared head."""
return SHARED_HEADS.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_detector(cfg, train_cfg=None, test_cfg=None):
"""Build detector."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return DETECTORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
import warnings
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.utils import Registry
MODELS = Registry('models', parent=MMCV_MODELS)
BACKBONES = MODELS
NECKS = MODELS
ROI_EXTRACTORS = MODELS
SHARED_HEADS = MODELS
HEADS = MODELS
LOSSES = MODELS
DETECTORS = MODELS
def build_backbone(cfg):
"""Build backbone."""
return BACKBONES.build(cfg)
def build_neck(cfg):
"""Build neck."""
return NECKS.build(cfg)
def build_roi_extractor(cfg):
"""Build roi extractor."""
return ROI_EXTRACTORS.build(cfg)
def build_shared_head(cfg):
"""Build shared head."""
return SHARED_HEADS.build(cfg)
def build_head(cfg):
"""Build head."""
return HEADS.build(cfg)
def build_loss(cfg):
"""Build loss."""
return LOSSES.build(cfg)
def build_detector(cfg, train_cfg=None, test_cfg=None):
"""Build detector."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return DETECTORS.build(
cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.attention.attention import Attention
@keras_export("keras.layers.AdditiveAttention")
class AdditiveAttention(Attention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)` as a non-linear sum
`scores = reduce_sum(tanh(query + key), axis=-1)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call arguments:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=True,
dropout=0.0,
**kwargs,
):
super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)
def build(self, input_shape):
self._validate_inputs(input_shape)
dim = input_shape[0][-1]
self.scale = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
base_config = super().get_config()
del base_config["score_mode"]
return base_config
|
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.attention.attention import Attention
@keras_export("keras.layers.AdditiveAttention")
class AdditiveAttention(Attention):
"""Additive attention layer, a.k.a. Bahdanau-style attention.
Inputs are a list with 2 or 3 elements:
1. A `query` tensor of shape `(batch_size, Tq, dim)`.
2. A `value` tensor of shape `(batch_size, Tv, dim)`.
3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none
supplied, `value` will be used as `key`.
The calculation follows the steps:
1. Calculate attention scores using `query` and `key` with shape
`(batch_size, Tq, Tv)` as a non-linear sum
`scores = reduce_sum(tanh(query + key), axis=-1)`.
2. Use scores to calculate a softmax distribution with shape
`(batch_size, Tq, Tv)`.
3. Use the softmax distribution to create a linear combination of `value`
with shape `(batch_size, Tq, dim)`.
Args:
use_scale: If `True`, will create a scalar variable to scale the
attention scores.
dropout: Float between 0 and 1. Fraction of the units to drop for the
attention scores. Defaults to `0.0`.
Call arguments:
inputs: List of the following tensors:
- `query`: Query tensor of shape `(batch_size, Tq, dim)`.
- `value`: Value tensor of shape `(batch_size, Tv, dim)`.
- `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If
not given, will use `value` for both `key` and `value`, which is
the most common case.
mask: List of the following tensors:
- `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.
If given, the output will be zero at the positions where
`mask==False`.
- `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.
If given, will apply the mask such that values at positions
where `mask==False` do not contribute to the result.
return_attention_scores: bool, it `True`, returns the attention scores
(after masking and softmax) as an additional output argument.
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (no dropout).
use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds
a mask such that position `i` cannot attend to positions `j > i`.
This prevents the flow of information from the future towards the
past. Defaults to `False`.
Output:
Attention outputs of shape `(batch_size, Tq, dim)`.
(Optional) Attention scores after masking and softmax with shape
`(batch_size, Tq, Tv)`.
"""
def __init__(
self,
use_scale=True,
dropout=0.0,
**kwargs,
):
super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)
def build(self, input_shape):
self._validate_inputs(input_shape)
dim = input_shape[0][-1]
self.scale = None
if self.use_scale:
self.scale = self.add_weight(
name="scale",
shape=[dim],
initializer="glorot_uniform",
dtype=self.dtype,
trainable=True,
)
self.built = True
def _calculate_scores(self, query, key):
"""Calculates attention scores as a nonlinear sum of query and key.
Args:
query: Query tensor of shape `(batch_size, Tq, dim)`.
key: Key tensor of shape `(batch_size, Tv, dim)`.
Returns:
Tensor of shape `(batch_size, Tq, Tv)`.
"""
# Reshape tensors to enable broadcasting.
# Reshape into [batch_size, Tq, 1, dim].
q_reshaped = ops.expand_dims(query, axis=-2)
# Reshape into [batch_size, 1, Tv, dim].
k_reshaped = ops.expand_dims(key, axis=-3)
scale = self.scale if self.use_scale else 1.0
return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)
def get_config(self):
base_config = super().get_config()
del base_config["score_mode"]
return base_config
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
preprocess_cfg = dict(
mean=[103.530, 116.280, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False,
pad_size_divisor=32)
model = dict(
type='NASFCOS',
preprocess_cfg=preprocess_cfg,
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# dataset settings
train_dataloader = dict(batch_size=4, num_workers=2)
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
|
_base_ = [
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='NASFCOS',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False, eps=0),
style='caffe',
init_cfg=dict(
type='Pretrained',
checkpoint='open-mmlab://detectron2/resnet50_caffe')),
neck=dict(
type='NASFCOS_FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
num_outs=5,
norm_cfg=dict(type='BN'),
conv_cfg=dict(type='DCNv2', deform_groups=2)),
bbox_head=dict(
type='FCOSHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
norm_cfg=dict(type='GN', num_groups=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
|
# coding=utf-8
# Copyright 2024 Descript and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dac model configuration"""
import math
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class DacConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`DacModel`]. It is used to instantiate a
Dac model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[descript/dac_16khz](https://huggingface.co/descript/dac_16khz) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
encoder_hidden_size (`int`, *optional*, defaults to 64):
Intermediate representation dimension for the encoder.
downsampling_ratios (`list[int]`, *optional*, defaults to `[2, 4, 8, 8]`):
Ratios for downsampling in the encoder. These are used in reverse order for upsampling in the decoder.
decoder_hidden_size (`int`, *optional*, defaults to 1536):
Intermediate representation dimension for the decoder.
n_codebooks (`int`, *optional*, defaults to 9):
Number of codebooks in the VQVAE.
codebook_size (`int`, *optional*, defaults to 1024):
Number of discrete codes in each codebook.
codebook_dim (`int`, *optional*, defaults to 8):
Dimension of the codebook vectors. If not defined, uses `encoder_hidden_size`.
quantizer_dropout (`bool`, *optional*, defaults to 0):
Whether to apply dropout to the quantizer.
commitment_loss_weight (float, *optional*, defaults to 0.25):
Weight of the commitment loss term in the VQVAE loss function.
codebook_loss_weight (float, *optional*, defaults to 1.0):
Weight of the codebook loss term in the VQVAE loss function.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
Example:
```python
>>> from transformers import DacModel, DacConfig
>>> # Initializing a "descript/dac_16khz" style configuration
>>> configuration = DacConfig()
>>> # Initializing a model (with random weights) from the "descript/dac_16khz" style configuration
>>> model = DacModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dac"
def __init__(
self,
encoder_hidden_size=64,
downsampling_ratios=[2, 4, 8, 8],
decoder_hidden_size=1536,
n_codebooks=9,
codebook_size=1024,
codebook_dim=8,
quantizer_dropout=0,
commitment_loss_weight=0.25,
codebook_loss_weight=1.0,
sampling_rate=16000,
**kwargs,
):
self.encoder_hidden_size = encoder_hidden_size
self.downsampling_ratios = downsampling_ratios
self.decoder_hidden_size = decoder_hidden_size
self.upsampling_ratios = downsampling_ratios[::-1]
self.n_codebooks = n_codebooks
self.codebook_size = codebook_size
self.codebook_dim = codebook_dim
self.quantizer_dropout = quantizer_dropout
self.sampling_rate = sampling_rate
self.hidden_size = encoder_hidden_size * (2 ** len(downsampling_ratios))
self.hop_length = int(np.prod(downsampling_ratios))
self.commitment_loss_weight = commitment_loss_weight
self.codebook_loss_weight = codebook_loss_weight
super().__init__(**kwargs)
@property
def frame_rate(self) -> int:
hop_length = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
__all__ = ["DacConfig"]
|
# coding=utf-8
# Copyright 2024 Descript and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dac model configuration"""
import math
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class DacConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`DacModel`]. It is used to instantiate a
Dac model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the
[descript/dac_16khz](https://huggingface.co/descript/dac_16khz) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
encoder_hidden_size (`int`, *optional*, defaults to 64):
Intermediate representation dimension for the encoder.
downsampling_ratios (`List[int]`, *optional*, defaults to `[2, 4, 8, 8]`):
Ratios for downsampling in the encoder. These are used in reverse order for upsampling in the decoder.
decoder_hidden_size (`int`, *optional*, defaults to 1536):
Intermediate representation dimension for the decoder.
n_codebooks (`int`, *optional*, defaults to 9):
Number of codebooks in the VQVAE.
codebook_size (`int`, *optional*, defaults to 1024):
Number of discrete codes in each codebook.
codebook_dim (`int`, *optional*, defaults to 8):
Dimension of the codebook vectors. If not defined, uses `encoder_hidden_size`.
quantizer_dropout (`bool`, *optional*, defaults to 0):
Whether to apply dropout to the quantizer.
commitment_loss_weight (float, *optional*, defaults to 0.25):
Weight of the commitment loss term in the VQVAE loss function.
codebook_loss_weight (float, *optional*, defaults to 1.0):
Weight of the codebook loss term in the VQVAE loss function.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
Example:
```python
>>> from transformers import DacModel, DacConfig
>>> # Initializing a "descript/dac_16khz" style configuration
>>> configuration = DacConfig()
>>> # Initializing a model (with random weights) from the "descript/dac_16khz" style configuration
>>> model = DacModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dac"
def __init__(
self,
encoder_hidden_size=64,
downsampling_ratios=[2, 4, 8, 8],
decoder_hidden_size=1536,
n_codebooks=9,
codebook_size=1024,
codebook_dim=8,
quantizer_dropout=0,
commitment_loss_weight=0.25,
codebook_loss_weight=1.0,
sampling_rate=16000,
**kwargs,
):
self.encoder_hidden_size = encoder_hidden_size
self.downsampling_ratios = downsampling_ratios
self.decoder_hidden_size = decoder_hidden_size
self.upsampling_ratios = downsampling_ratios[::-1]
self.n_codebooks = n_codebooks
self.codebook_size = codebook_size
self.codebook_dim = codebook_dim
self.quantizer_dropout = quantizer_dropout
self.sampling_rate = sampling_rate
self.hidden_size = encoder_hidden_size * (2 ** len(downsampling_ratios))
self.hop_length = int(np.prod(downsampling_ratios))
self.commitment_loss_weight = commitment_loss_weight
self.codebook_loss_weight = codebook_loss_weight
super().__init__(**kwargs)
@property
def frame_rate(self) -> int:
hop_length = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
__all__ = ["DacConfig"]
|
import pytest
from jina import Client, Document, DocumentArray, Flow
@pytest.mark.parametrize('shards', [1, 2])
@pytest.mark.parametrize('replicas', [1, 3, 4])
def test_containerruntime_args(
docker_image_name, docker_image_built, shards, replicas, port_generator
):
exposed_port = port_generator()
f = Flow(port=exposed_port).add(
name='executor_container',
uses=f'docker://{docker_image_name}',
replicas=replicas,
shards=shards,
polling='ANY',
)
with f:
ret1 = Client(port=exposed_port).index(
inputs=DocumentArray([Document() for _ in range(2000)]),
request_size=10,
return_responses=True,
)
assert len(ret1) == 200
unique_replicas = set()
shard_ids = set()
for r in ret1:
assert len(r.docs) == 10
for replica in r.docs[:, 'tags__replica']:
unique_replicas.add(replica)
for shard_id in r.docs[:, 'tags__shard_id']:
shard_ids.add(shard_id)
for doc in r.docs:
assert doc.tags['shards'] == shards
assert shard_ids == set(range(shards))
assert len(unique_replicas) == replicas * shards
|
import os
import time
import pytest
from jina import Client, Document, DocumentArray, Flow
@pytest.mark.parametrize('shards', [1, 2])
@pytest.mark.parametrize('replicas', [1, 3, 4])
def test_containerruntime_args(
docker_image_name, docker_image_built, shards, replicas, port_generator
):
exposed_port = port_generator()
f = Flow(port=exposed_port).add(
name='executor_container',
uses=f'docker://{docker_image_name}',
replicas=replicas,
shards=shards,
polling='ANY',
)
with f:
ret1 = Client(port=exposed_port).index(
inputs=DocumentArray([Document() for _ in range(2000)]),
request_size=10,
return_responses=True,
)
assert len(ret1) == 200
unique_replicas = set()
shard_ids = set()
for r in ret1:
assert len(r.docs) == 10
for replica in r.docs[:, 'tags__replica']:
unique_replicas.add(replica)
for shard_id in r.docs[:, 'tags__shard_id']:
shard_ids.add(shard_id)
for doc in r.docs:
assert doc.tags['shards'] == shards
assert shard_ids == set(range(shards))
assert len(unique_replicas) == replicas * shards
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> list[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
return ["text"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy",
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
def embed_query(self, text: str) -> list[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs,
config={"callbacks": _run_manager.get_child()},
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
msg = (
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
raise ValueError(msg)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.runnables import Runnable
from pydantic import ConfigDict
from langchain.chains.base import Chain
from langchain.chains.hyde.prompts import PROMPT_MAP
from langchain.chains.llm import LLMChain
logger = logging.getLogger(__name__)
class HypotheticalDocumentEmbedder(Chain, Embeddings):
"""Generate hypothetical document for query, and then embed that.
Based on https://arxiv.org/abs/2212.10496
"""
base_embeddings: Embeddings
llm_chain: Runnable
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Input keys for Hyde's LLM chain."""
return self.llm_chain.input_schema.model_json_schema()["required"]
@property
def output_keys(self) -> list[str]:
"""Output keys for Hyde's LLM chain."""
if isinstance(self.llm_chain, LLMChain):
return self.llm_chain.output_keys
return ["text"]
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Call the base embeddings."""
return self.base_embeddings.embed_documents(texts)
def combine_embeddings(self, embeddings: list[list[float]]) -> list[float]:
"""Combine embeddings into final embeddings."""
try:
import numpy as np
return list(np.array(embeddings).mean(axis=0))
except ImportError:
logger.warning(
"NumPy not found in the current Python environment. "
"HypotheticalDocumentEmbedder will use a pure Python implementation "
"for internal calculations, which may significantly impact "
"performance, especially for large datasets. For optimal speed and "
"efficiency, consider installing NumPy: pip install numpy"
)
if not embeddings:
return []
num_vectors = len(embeddings)
return [sum(dim_values) / num_vectors for dim_values in zip(*embeddings)]
def embed_query(self, text: str) -> list[float]:
"""Generate a hypothetical document and embedded it."""
var_name = self.input_keys[0]
result = self.llm_chain.invoke({var_name: text})
if isinstance(self.llm_chain, LLMChain):
documents = [result[self.output_keys[0]]]
else:
documents = [result]
embeddings = self.embed_documents(documents)
return self.combine_embeddings(embeddings)
def _call(
self,
inputs: dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> dict[str, str]:
"""Call the internal llm chain."""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
return self.llm_chain.invoke(
inputs, config={"callbacks": _run_manager.get_child()}
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
base_embeddings: Embeddings,
prompt_key: Optional[str] = None,
custom_prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> HypotheticalDocumentEmbedder:
"""Load and use LLMChain with either a specific prompt key or custom prompt."""
if custom_prompt is not None:
prompt = custom_prompt
elif prompt_key is not None and prompt_key in PROMPT_MAP:
prompt = PROMPT_MAP[prompt_key]
else:
msg = (
f"Must specify prompt_key if custom_prompt not provided. Should be one "
f"of {list(PROMPT_MAP.keys())}."
)
raise ValueError(msg)
llm_chain = prompt | llm | StrOutputParser()
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
@property
def _chain_type(self) -> str:
return "hyde_chain"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.