python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
"""
Tools to search sentences in CC similar to sentences in another corpus.
"""
import functools
import logging
import math
import subprocess
from collections import Counter
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple
import func_argparse
import submitit
from kenlm import Model as KenlmModel # type: ignore
from sentence_splitter import SentenceSplitter # type: ignore
from sentencepiece import SentencePieceProcessor # type: ignore
from cc_net import dedup, jsonql, perplexity, text_normalizer
KENLM = Path("./bin/lmplz")
KENLM_BUILD = Path("./bin/build_binary")
VOCAB_SIZE = 2 ** 16 - 10
PROCESSES = 16
def normalize(corpus: Path, output_dir: Path) -> Path:
normalized = output_dir / (corpus.stem + ".normalized")
if normalized.exists():
return normalized
print("Will normalize", corpus, "to", normalized)
jsonql.run_pipes(
jsonql.Mapper(text_normalizer.normalize),
file=corpus,
output=normalized,
processes=PROCESSES,
)
return normalized
# TODO use classic files directory.
def sp_model(lang: str) -> Path:
return Path(f"/checkpoint/guw/cc_clean/lm_sp/{lang}.sp.model")
def _dataset(dataset: Optional[Path], lang: str) -> Path:
return (
dataset
or Path("/datasets01_101/common_crawl/020919") / f"{lang}_head_*.json.gz"
)
class SentencePiece(jsonql.Transformer):
def __init__(self, model: Path):
super().__init__()
self.model = model
self.sp: SentencePieceProcessor = None # type: ignore
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.model))
def do(self, line: str) -> str:
return " ".join(self.sp.encode_as_pieces(line))
class ExtractSentences(jsonql.Transformer):
def __init__(
self,
sp_model: Path,
lm_model: Path,
field: str = "raw_content",
threshold: float = float("+inf"),
):
super().__init__()
self.sp_model = sp_model
self.lm_model = lm_model
self.field = field
self.threshold = threshold
self.sp: SentencePieceProcessor = None
self.lm: KenlmModel = None
self.splitter: SentenceSplitter = None
self.hashes: Set[int] = set()
def _prepare(self):
self.sp = SentencePieceProcessor()
self.sp.load(str(self.sp_model))
self.splitter = SentenceSplitter("en")
self.lm = KenlmModel(str(self.lm_model))
def do(self, document: dict) -> Optional[str]:
content: Optional[str] = document.get(self.field)
if not content:
return None
all_sentences = [
s for l in content.split("\n") if l for s in self.splitter.split(text=l)
]
unique_sentences = []
for s in all_sentences:
if not s:
continue
h = dedup.str_hash(s)
if h in self.hashes:
continue
self.hashes.add(h)
unique_sentences.append(s)
scores = []
for sentence in unique_sentences:
normalized = text_normalizer.normalize(sentence)
pieces = self.sp.encode_as_pieces(normalized)
log_score = self.lm.score(" ".join(pieces))
pp = -1
if len(pieces):
pp = perplexity.pp(log_score, len(pieces))
scores.append(pp)
res = filter(
lambda pp_s: self.threshold > pp_s[0] > 0, zip(scores, unique_sentences)
)
return "\n".join(f"{pp}\t{s}" for (pp, s) in res) or None
def tokenize(corpus: Path, output_dir: Path, lang: str) -> Path:
tokenized = output_dir / (corpus.stem + ".tokenized")
if tokenized.exists():
return tokenized
print("Will SentencePiece", corpus, "to", tokenized)
jsonql.run_pipes(
SentencePiece(sp_model(lang)),
file=normalize(corpus, output_dir),
output=tokenized,
processes=PROCESSES,
)
return tokenized
def train_lm(
corpus: Path,
output_dir: Path,
lang: str = "en",
vocab_size: int = VOCAB_SIZE,
ngrams: int = 5,
):
lm_text_file = output_dir / (corpus.stem + ".arpa")
lm_bin_file = output_dir / (corpus.stem + ".arpa.bin")
if lm_bin_file.exists():
return lm_bin_file
assert KENLM.exists(), f"{KENLM} binary to train kenlm model not found."
normalized = normalize(corpus, output_dir)
tokenized = tokenize(normalized, output_dir, lang)
print("Will train LM", lm_text_file, "on", tokenized)
kenlm_cmd = [
str(KENLM),
f"--order={ngrams}",
"--memory=8G",
f"--temp_prefix={jsonql._tmp_dir()}",
f"--text={tokenized}",
f"--arpa={lm_text_file}",
f"--vocab_estimate={vocab_size}",
"--discount_fallback",
]
subprocess.run(kenlm_cmd, check=True)
print("Will create binary model", lm_bin_file, "from", lm_text_file)
subprocess.run([str(KENLM_BUILD), str(lm_text_file), str(lm_bin_file)], check=True)
return lm_bin_file
def uniform_sampling_wrt_perplexity(
paragraphes: Iterable[str],
rounding: float = 100.0,
cut: float = 1000.0,
samples: int = 20,
) -> Iterable[str]:
max_samples = math.floor(cut / rounding * samples)
n = 0
buckets = Counter([0.0])
logging.info(f"Will sample {max_samples} sentences.")
for lines in paragraphes:
for line in lines.split("\n"):
if not line:
continue
pp = float(line[: line.find("\t")])
pp = math.floor(pp / rounding) * rounding
if pp > cut:
continue
if buckets[pp] > samples:
continue
yield line
buckets[pp] += 1
if buckets[pp] > samples:
logging.info(f"Bucket {pp} is full ({samples} samples, {n} total)")
n += 1
if n > max_samples:
return
def sample(
corpus: Path,
output_dir: Path,
dataset: Path = None,
n: int = 10_000,
lang: str = "en",
) -> Path:
sample_file = output_dir / (corpus.stem + ".pp_sample.tsv")
if sample_file.exists():
return sample_file
dataset = _dataset(dataset, lang)
extractor = ExtractSentences(
sp_model(lang), train_lm(corpus, output_dir), field="raw_content"
)
sampling = functools.partial(
uniform_sampling_wrt_perplexity, rounding=100.0, cut=1000.0, samples=n // 10
)
print(f"Will sample data from {dataset} to {sample_file}")
try:
jsonql.run_pipes(
extractor, sampling, file=dataset, output=sample_file, processes=PROCESSES
)
except Exception:
sample_file.unlink()
raise
subprocess.run(["sort", "-n", "-o", sample_file, sample_file], check=True)
subprocess.run(["head", sample_file], check=True)
return sample_file
def mine(
corpus: Path,
output_dir: Path,
threshold: float,
dataset: Path = None,
lang: str = "en",
) -> List[Path]:
"""Search sentences in CC similar to the one in the given corpus.
Args:
- corpus: corpus to train the LM one. Assumes one sentence per line.
- output_dir: where to store the results
- threshold: maximum perplexity to have
- dataset: glob pattern matching CC shards.
- lang: search in the files of this language
"""
dataset = _dataset(dataset, lang)
files = list(dataset.parent.glob(dataset.name))
outputs = [output_dir / (f.stem + ".tsv") for f in files]
if all(o.exists() for o in outputs):
return outputs
n = len(outputs)
sp = [sp_model(lang)] * n
lm = [train_lm(corpus, output_dir)] * n
thresholds = [threshold] * n
ex = submitit.AutoExecutor(output_dir / "mining_logs")
ex.update_parameters(
name="mine",
cpus_per_task=PROCESSES,
timeout_min=60 * 24 // PROCESSES,
mem_gb=10,
)
jobs = ex.map_array(_mine, files, outputs, sp, lm, thresholds)
print("Submited job array:", jobs[0])
for j in submitit.helpers.as_completed(jobs):
(i, o) = j.result()
print("Mined sentences from", i, "to", o)
return outputs
def _mine(
file: Path, output: Path, sp: Path, lm: Path, threshold: float
) -> Tuple[Path, Path]:
extractor = ExtractSentences(sp, lm, field="raw_content", threshold=threshold)
jsonql.run_pipes(extractor, file=file, output=output, processes=PROCESSES)
return (file, output)
if __name__ == "__main__":
func_argparse.main(sample, mine)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/cc_net/tools/expand_corpus.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
from typing import Iterable, Sequence
from cc_net import dedup, jsonql
from cc_net.dedup import str_hash
from cc_net.flat_hash_set import FlatHashSet
def text(*args: str) -> str:
return "\n".join(args)
def write_docs(file: Path, docs: Iterable[Sequence[str]]):
file.parent.mkdir(exist_ok=True)
with open(file, "w") as f:
for sentences in docs:
doc = dict(text=text(*sentences))
print(json.dumps(doc), file=f)
def as_dict(hash_set):
if not isinstance(hash_set, dict):
hash_set = {k: v for (k, v) in hash_set.items()}
return hash_set
def load_hashes(file):
results = dedup.FlatHashSet()
results.load(file)
return as_dict(results)
LENGTHS = ["original_length", "length"]
def assert_documents_equal(expected, actual, ignoring={}):
expected = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
actual = [{k: doc[k] for k in doc if k not in ignoring} for doc in expected]
assert expected == actual
def test_simple_dedup(tmp_path: Path) -> None:
write_docs(
tmp_path / "docs.json",
[
["_Hello", "_World", "I'm so original"],
["_world", "I'm originaler", "_Hello"],
],
)
results = list(dedup.deduplicate(tmp_path / "docs.json", field="text"))
expected = [
# First document is untouched
dict(
text=text("_Hello", "_World", "I'm so original"),
original_nlines=3,
nlines=3,
line_ids=[0, 1, 2],
),
# Second documents loses several lines
dict(text="I'm originaler", original_nlines=3, nlines=1, line_ids=[1]),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_with_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
collector = dedup.HashesCollector(field="text", output=hashes)
list(collector.map(documents))
results = load_hashes(hashes)
expected = {
str_hash(l): l.startswith("_")
for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
}
assert expected == results
def test_dedup_with_np_dump(tmp_path: Path):
hashes = tmp_path / "hashes.bin"
documents = [
dict(text=text("_Hello", "_World", "I'm so original")),
dict(text=text("_world", "I'm originaler", "_Hello")),
]
with dedup.HashesCollector(field="text", output=hashes) as d:
list(d.map(documents))
results = FlatHashSet()
results.load_np(hashes)
expected = set(
str_hash(l) for l in ["_hello", "_world", "i'm so original", "i'm originaler"]
)
assert expected == set(results.keys())
def test_dedup_from_hashes(tmp_path: Path):
documents = [
dict(text=text("_Hello", "World", "I'm so original")),
dict(text=text("Good morning", "World", "I'm originaler")),
]
seen = ["_hello", "i'm originaler", "world"]
hashes = [str_hash(h) for h in seen]
h = dedup.FlatHashSet()
h.add(hashes)
# Note: 'world' appears only once and won't be treated as a duplicate.
h.add(hashes[:-1])
h.dump(tmp_path / "hashes.bin")
results = list(
dedup.DuplicatesRemover("text", [tmp_path / "hashes.bin"]).map(documents)
)
expected = [
dict(
text=text("World", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[1, 2],
),
dict(
text=text("Good morning", "World"),
original_nlines=3,
nlines=2,
line_ids=[0, 1],
),
]
assert_documents_equal(expected, results, ignoring=LENGTHS)
def test_dedup_fast(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
parts = [data / "part_0.json", data / "part_1.json"]
res = tmp_path / "res"
res.mkdir()
h = tmp_path / "hashes.bin"
field = "text"
jsonql.run_pipes(dedup.HashesCollector(field, output=h), file=parts)
for part in parts:
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
jsonql.run_pipes(
dedup.DuplicatesRemover(field, [h]), file=part, output=res / part.name
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(
text=text("Good morning", "I'm originaler"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
words = [w for part in [part_0, part_1] for doc in part for w in doc]
expected = {str_hash(s.lower()): s.startswith("_") for s in words}
assert expected == load_hashes(h)
def test_remove_duplicates_sharded(tmp_path: Path):
data = tmp_path / "data"
part_0 = [["Hello", "_World", "I'm so original"]]
write_docs(data / "part_0.json", part_0)
part_1 = [["_Good morning", "_World", "I'm originaler"]]
write_docs(data / "part_1.json", part_1)
h = tmp_path / "hashes"
h.mkdir()
h0 = FlatHashSet()
h0.add([str_hash(s.lower()) for doc in part_0 for s in doc])
h0.add([str_hash("_world")])
h0.dump(h / "part_0.bin")
assert {
str_hash("hello"): False,
str_hash("_world"): True,
str_hash("i'm so original"): False,
} == as_dict(h0)
h1 = FlatHashSet()
h1.add([str_hash(s.lower()) for doc in part_1 for s in doc])
h1.add([str_hash("_good morning")])
h1.dump(h / "part_1.bin")
assert {
str_hash("_good morning"): True,
str_hash("_world"): False,
str_hash("i'm originaler"): False,
} == as_dict(h1)
res = tmp_path / "res"
res.mkdir()
# dedup.DISABLE_MULTI_PROCESSING = True # Simplifies debugging
dedup.remove_duplicates_sharded(
files=[data / "part_0.json", data / "part_1.json"],
outputs=[res / "part_0.json", res / "part_1.json"],
field="text",
hashes_dir=h,
)
results_0 = list(jsonql.read_jsons(res / "part_0.json"))
expected_0 = [
dict(
text=text("Hello", "I'm so original"),
original_nlines=3,
nlines=2,
line_ids=[0, 2],
)
]
assert_documents_equal(expected_0, results_0, ignoring=LENGTHS)
# First pass removes "_world", second "_good morning".
results_1 = list(jsonql.read_jsons(res / "part_1.json"))
expected_1 = [
dict(text=text("I'm originaler"), original_nlines=3, nlines=1, line_ids=[2])
]
assert_documents_equal(expected_1, results_1, ignoring=LENGTHS)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_dedup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import cc_net.text_normalizer as txt
def test_unicode_punct():
weird = ",。、„”“«»1」「《》´∶:?!();–—.~’…━〈〉【】%"
replaced = ',.,""""""""""\'::?!();- - . ~\'...-<>[]%'
assert txt.replace_unicode_punct(weird) == replaced
assert txt.remove_unicode_punct(weird) == ""
def test_numbers():
weird = "023456789 | 0123456789"
normalized = "000000000 | 0000000000"
assert txt.normalize(weird, numbers=True) == normalized
assert txt.normalize(weird, numbers=False) == weird
def test_normalize_for_dedup():
weird = "023´∶:\x10 | ;012 hèllo"
normalized = "000 | ;000 hèllo"
assert normalized == txt.slow_normalize_for_dedup(weird)
assert normalized == txt.normalize_for_dedup(weird)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_normalizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from pathlib import Path
from cc_net import process_wet_file
def test_parsing():
sample = Path(__file__).parent / "data" / "sample.warc.txt"
with open(sample) as f:
documents = list(process_wet_file.parse_warc_file(f))
expected_urls = [
"http://sample_english.com",
"http://sample_chinese.zh",
"http://sample_russian.ru",
]
assert expected_urls == [d["url"] for d in documents]
expected_domains = ["sample_english.com", "sample_chinese.zh", "sample_russian.ru"]
assert expected_domains == [d["source_domain"] for d in documents]
expected_date = [
"2019-03-18T00:00:00Z",
"2019-03-18T00:00:01Z",
"2019-03-18T00:00:02Z",
]
assert expected_date == [d["date_download"] for d in documents]
expected_title = [
"Famous Mark Twain Quotes",
"馬克·吐溫名言",
"Цитаты знаменитого Марка Твена",
]
assert expected_title == [d["title"] for d in documents]
expected_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't.
"""
assert expected_quotes == documents[0]["raw_content"]
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_parse_wet_file.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import pytest
from cc_net.flat_hash_set import HASH_TYPE, FlatHashSet, NaiveHashSet
def as_dict(flat_hash_set) -> dict:
return {k: v for (k, v) in flat_hash_set.items()}
need_getpy = pytest.mark.skipif(
FlatHashSet == NaiveHashSet, reason="getpy isn't installed"
)
def same_behavior(test_case):
def run_case():
naive = as_dict(test_case(FlatHashSet))
flat = as_dict(test_case(NaiveHashSet))
assert naive == flat
return need_getpy(run_case)
@same_behavior
def test_setitem(hash_set_cls):
h = hash_set_cls()
h[np.arange(10, dtype=h.dtype)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=h.dtype)] = np.ones(5, dtype=np.uint8)
return h
@same_behavior
def test_add_dup(hash_set_cls):
h = hash_set_cls()
h.add(np.arange(10, dtype=h.dtype))
h.add(np.arange(5, dtype=h.dtype))
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h), f"add_dup with {hash_set_cls.__name__}"
return h
@need_getpy
def test_gp_dict():
import getpy as gp # type: ignore
h = gp.Dict(HASH_TYPE, np.uint8)
h[np.arange(10, dtype=HASH_TYPE)] = np.zeros(10, dtype=np.uint8)
h[np.arange(5, dtype=HASH_TYPE)] = np.ones(5, dtype=np.uint8)
expected = {i: i < 5 for i in range(10)}
assert expected == as_dict(h)
def check_reload(h, dump, load, tmp_path):
dump_path = tmp_path / dump.__name__
dump(h, dump_path)
h2 = type(h)()
load(h2, dump_path)
assert as_dict(h) == as_dict(h2)
@pytest.mark.parametrize("hash_set_cls", [FlatHashSet, NaiveHashSet])
def test_loading(tmp_path, hash_set_cls):
h = hash_set_cls()
x = np.random.randint(0, 2 ** 32, (100,), dtype=h.dtype)
h.add(x)
check_reload(h, hash_set_cls.dump, hash_set_cls.load, tmp_path)
check_reload(h, hash_set_cls.dump_np, hash_set_cls.load_np, tmp_path)
if hasattr(hash_set_cls, "dump_gp"):
check_reload(h, hash_set_cls.dump_gp, hash_set_cls.load_gp, tmp_path)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_flat_hash_set.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import pytest
def _request_is_disabled(self, *args, **kwargs):
raise Exception(
f"Your code tried to call 'request' with: {args}, {kwargs}. Unit test aren't allowed to reach internet."
)
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
"""Remove requests.sessions.Session.request for all tests."""
monkeypatch.setattr("requests.sessions.Session.request", _request_is_disabled)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/conftest.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import time
from cc_net import jsonql, regroup
def check_regroup(tmp_path, regroup_fn, check_blocks_boundaries=False):
n_shards = 4
n_docs = 20
shards = [
[dict(id=i, shard=s, raw_content="hello world") for i in range(n_docs)]
for s in range(n_shards)
]
shards_files = [tmp_path / f"{s:04d}.json.gz" for s in range(n_shards)]
for shard, shard_file in zip(shards, shards_files):
jsonql.run_pipes(inputs=shard, output=shard_file)
regroup_file = tmp_path / "regroup.json.gz"
start = time.time()
regroup_fn(shards_files, regroup_file)
duration = time.time() - start
print(f"{regroup_fn.__module__}.{regroup_fn.__name__} took {duration}s")
regrouped = list(jsonql.read_jsons(regroup_file))
assert [doc for shard in shards for doc in shard] == regrouped
readers = jsonql.get_block_readers(regroup_file, n_shards)
if not check_blocks_boundaries:
assert [doc for shard in shards for doc in shard] == [
doc for reader in readers for doc in jsonql.read_jsons(reader)
]
return
for shard, reader in zip(shards, readers):
block = [doc for doc in jsonql.read_jsons(reader)]
assert shard == block
def test_regroup(tmp_path):
# With regroup boundaries will be every 256Mb.
check_regroup(tmp_path, regroup.reshard, check_blocks_boundaries=False)
def test_fast_regroup(tmp_path):
# With fast regroup boundaries should match the shards.
check_regroup(tmp_path, regroup.fast_reshard, check_blocks_boundaries=True)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_regroup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import io
from pathlib import Path
from typing import Sequence
import numpy as np
import pytest
from cc_net import jsonql
def bar(small_bar: str) -> str:
return small_bar.replace(" ", " " * 10).replace("█", "█" * 10)
def get_output(transformer, data, **kwargs):
with io.StringIO() as output:
# Convert data to a generator so that it's not interpreted as a file list.
jsonql.run_pipe(transformer, kwargs, file=(x for x in data), output=output)
return output.getvalue()
def test_split(tmp_path: Path):
data = [
dict(text="Hello world", lang="en"),
dict(text="Boujour les amis", lang="fr"),
dict(text="Rock your boat", lang="en"),
]
with jsonql.split(tmp_path / "{lang}.json") as split:
list(split.map(data))
summary = split.summary()
assert "Found 2 splits." in summary
en_docs = list(jsonql.read_jsons(tmp_path / "en.json"))
assert [data[0], data[2]] == en_docs
fr_docs = list(jsonql.read_jsons(tmp_path / "fr.json"))
assert [data[1]] == fr_docs
def test_split_bad_pattern(tmp_path: Path):
data = [dict(text="Hello world", lang="en")]
with pytest.raises(KeyError):
with jsonql.split(tmp_path / "{language}.json") as split:
list(split.map(data))
def test_histogram():
data = [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]
hist, bins = jsonql.histogram(data, bins=8, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(1, 10)])
np.testing.assert_almost_equal(hist, [4, 0, 0, 2, 0, 0, 0, 2])
data = [0, 0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.8, 0.8, 1]
hist, bins = jsonql.histogram(data, bins=10, weights=None)
np.testing.assert_almost_equal(bins, [0.1 * x for x in range(11)])
np.testing.assert_almost_equal(hist, [1, 4, 0, 0, 2, 0, 0, 0, 2, 1])
def test_display_stats():
stats = {
jsonql.ALL_DOCUMENTS: 100,
"title": 80,
"title.length": 80 * 50,
"text": 100,
"text.length": 100 * 1000,
"popularity": 8,
"popularity.val": [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9],
}
(title,) = jsonql.display_stats(stats, "title")
assert "title" in title
assert "saw 80 times" in title
assert "average length is" in title
assert "\n" not in title
(text,) = jsonql.display_stats(stats, "text")
assert "text" in text
assert "saw 100 times" in text
assert "average length is" in text
assert "\n" not in text
histogram = jsonql.display_stats(
stats, "popularity", bins=[x / 10 for x in range(1, 10)]
)
assert "popularity" in histogram[0]
assert "saw 8 times" in histogram[0]
assert "histogram is" in histogram[0]
assert "0.100 " + bar("████████") in histogram[1]
assert "0.400 " + bar("████ ") in histogram[2]
assert "0.800 " + bar("████ ") in histogram[3]
cum_histogram = jsonql.display_stats(stats, "popularity", bins=8, cumulative=True)
assert "popularity" in cum_histogram[0]
assert "saw 8 times" in cum_histogram[0]
assert "histogram is" in cum_histogram[0]
assert "0.100 " + bar("████ ") in cum_histogram[1]
assert "0.400 " + bar("██████ ") in cum_histogram[2]
assert "0.800 " + bar("████████") in cum_histogram[3]
def test_describe():
def sample(pop):
return dict(title="Lorem", text="Lorem ipsum dolor sit amet.", popularity=pop)
data = [sample(pop) for pop in [0.1, 0.1, 0.1, 0.1, 0.4, 0.4, 0.9, 0.9]]
desc = get_output(
jsonql.describe, data, columns=None, bins=[x / 10 for x in range(1, 10)]
)
assert "Field title saw 8 times (100.0%), average length is 5" in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity saw 8 times (100.0%), histogram is" in desc
assert "0.100 " + bar("████████") in desc
assert "0.400 " + bar("████ ") in desc
assert "0.800 " + bar("████ ") in desc
desc = get_output(jsonql.describe, data, columns=["text"])
assert "Field title saw 8 times (100.0%), average length is 5" not in desc
assert "Field text saw 8 times (100.0%), average length is 27" in desc
assert "Field popularity, histogram is:" not in desc
def test_custom_pipe():
def transformer(source, sep=" "):
for i, line in enumerate(source):
res = f"{i}{sep}{line}"
yield res
data = ["hello", "world"]
assert get_output(transformer, data) == "0 hello\n1 world\n"
assert get_output(transformer, data, sep="_") == "0_hello\n1_world\n"
def test_open_read_write(tmp_path: Path):
def _lines(filename: Path) -> Sequence[str]:
# jsonql.lines calls open_read
return list(jsonql.lines(filename))
tmp = tmp_path
with jsonql.open_write(tmp / "a.txt") as o:
print("a", file=o)
assert _lines(tmp / "a.txt") == ["a"]
jsonql.write_jsons([{"a": 1}], tmp / "a.txt")
assert _lines(tmp / "a.txt") == ['{"a": 1}']
with jsonql.open_write(tmp / "a.gz") as o:
print("a", file=o)
assert _lines(tmp / "a.gz") == ["a"]
with jsonql.open_write([tmp / "a0.txt", tmp / "a1.txt"]) as o:
print("a", file=o)
assert _lines(tmp / "a0.txt") == ["a"]
assert not (tmp / "a1.txt").is_file()
with jsonql.open_write([tmp / "b0.txt", tmp / "b1.txt"], max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b0.txt") == ["0" * 2000]
assert _lines(tmp / "b1.txt") == ["1" * 2000]
with jsonql.open_write(tmp / "a_????.json") as o:
print("a", file=o)
assert _lines(tmp / "a_0000.json") == ["a"]
assert not (tmp / "a_0001.json").is_file()
assert _lines(tmp / "a_*.json") == ["a"]
with jsonql.open_write(tmp / "b_??.json", max_size="1k") as o:
print("0" * 2000, file=o)
print("1" * 2000, file=o)
assert _lines(tmp / "b_00.json") == ["0" * 2000]
assert _lines(tmp / "b_01.json") == ["1" * 2000]
assert _lines(tmp / "b_*.json") == ["0" * 2000, "1" * 2000]
def test_split_file(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\nWorld\n"
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_line(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello _|_\nWorld\n"
# split is here ^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello _|_\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_split_file_middle_of_char(tmp_path: Path):
file = tmp_path / "test.txt"
content = "Hello\U0001F40D\nWorld\n"
# split is here ^^
with open(file, "w") as o:
o.write(content)
with jsonql.SplitFile(file, chunk=0, n_chunks=2) as f:
assert f.readlines() == ["Hello🐍\n"]
with jsonql.SplitFile(file, chunk=1, n_chunks=2) as f:
assert f.readlines() == ["World\n"]
def test_blocked_gzip(tmp_path: Path):
file = tmp_path / "test.gz"
f = str(file)
# Each object is 10/11 bytes long. We have 2 of them by block.
content = ['{"xx": %d}' % i for i in range(80)]
with jsonql.BlockedGzipWriter(file, "wt", block_size="20B") as o:
for line in content:
print(line, file=o)
jr = jsonql.JsonReader(strict=True)
expected = list(jr.map(content))
# read as one file
assert expected == list(jsonql.read_jsons(file))
# read first block
assert expected[:2] == list(jsonql.read_jsons(f + "[0/40]"))
# read last block
assert expected[-2:] == list(jsonql.read_jsons(f + "[39/40]"))
readers = jsonql.get_block_readers(file, 9)
read_as_several_files = [list(jsonql.read_jsons(r)) for r in readers]
# 40 splits of 2 docs, 9 readers -> 5 splits, 10 docs per reader
assert list(jsonql.grouper(expected, 10)) == read_as_several_files
def test_enter_exit(capsys):
class MyTransformer(jsonql.Transformer):
def __enter__(self):
print("trans: started")
self.ready = True
return self
def __exit__(self, *args):
print("trans: done")
def do(self, x):
return (x, x)
def acc(values):
print("acc: started")
res = 0
for (x, _) in values:
res += int(x)
print("acc: done")
yield f"acc: result={res}"
t = MyTransformer()
data = (str(x) for x in range(10))
print("pipeline: started")
# Print to stdout.
jsonql.run_pipes(t, acc, file=data)
print("pipeline: done")
out = capsys.readouterr().out
assert (
"\n".join(
[
"pipeline: started",
"trans: started",
"acc: started",
"acc: done",
f"acc: result=45",
# Transformers are closed at the very end.
"trans: done",
"pipeline: done\n",
]
)
== out
)
def test_write_to_stdout(capsys):
lines = [str(x) for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "\n".join(lines) + "\n"
def test_write_to_stdout_handle_newlines(capsys):
lines = [str(x) + "\n" for x in range(10)]
jsonql.run_pipes(file=iter(lines))
out = capsys.readouterr().out
assert out == "".join(lines)
def test_multiprocess(capsys):
mult = jsonql.Mapper(lambda x: f"2x = {2 * int(x)}")
jsonql.run_pipes(mult, processes=2, file=(str(x) for x in range(10)))
out = set(capsys.readouterr().out.strip("\n").split("\n"))
assert set(f"2x = {2 * x}" for x in range(10)) == out
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_jsonql.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_minify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import inspect
import pickle
from pathlib import Path
import pytest
from cc_net import dedup, jsonql, perplexity, split_by_lang, tokenizer
def get_transformers(module):
return [
v
for v in vars(module).values()
if type(v) is type
and issubclass(v, jsonql.Transformer)
and v != jsonql.Transformer
]
ALL_TRANSFORMERS = (
get_transformers(jsonql)
+ get_transformers(dedup)
+ get_transformers(perplexity)
+ get_transformers(tokenizer)
+ get_transformers(split_by_lang)
)
def check_transformer_is_calling_super_init(cls: type):
assert issubclass(cls, jsonql.Transformer)
# accessing __init__ is generally an error, but here we do want to inspect
# the __init__method.
code = inspect.getsource(cls.__init__) # type: ignore
code = code.replace(" ", "")
# Check that super().__init__ is called.
assert "super().__init__()" in code
def test_bad_transformers_are_caught():
class BadTransformer(jsonql.Transformer):
def __init__(self, arg):
# We aren't calling super /!\
self.arg = arg
with pytest.raises(AssertionError):
check_transformer_is_calling_super_init(BadTransformer)
@pytest.mark.parametrize("transformer", ALL_TRANSFORMERS)
def test_transformer_is_correctly_implemented(transformer):
check_transformer_is_calling_super_init(transformer)
@pytest.mark.skipif(
not Path("bin/lid.bin").exists(), reason="bin/lid.bin not found, run `make install`"
)
def test_can_pickle_transformer(tmp_path):
model = Path("bin/lid.bin")
if not model.exists():
return
classifier = split_by_lang.Classifier(model, "text", "lang")
classifier.__enter__()
doc = dict(text="Hello world ! This is English btw.")
original_results = classifier(doc)
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
# Do it again with the unpickled object.
with open(tmp_path / "transformer.pkl", "wb") as o:
pickle.dump(classifier, o)
with open(tmp_path / "transformer.pkl", "rb") as f:
classifier = pickle.load(f)
assert original_results == classifier(doc)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/cc_net/tests/test_transformer.py
|
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
## Load data from the Wikipedia corpus
## And output them as label "__label__wiki"
#
files = ["cc_net/data/mined/wikipedia/en_head_0000.json.gz", "cc_net/data/mined/wikipedia/en_middle_0000.json.gz"]
unique = {}
i = 0
for f in files:
for jstr in gzip.open(f, "rt"):
i = i + 1
result = json.loads(jstr)
result["class"] = "wiki"
if result["digest"] in unique:
continue
unique["digest"] = 1
if(len(result["raw_content"]) < 1000):
continue
print("__label__wiki " + " ".join(result["raw_content"].splitlines()))
jobs = []
for file in glob.glob("common_crawl/*/*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
## Fetch `perfile` number of webpages for each CommonCrawl partition
#
perfile = i / len(jobs)
## Output Commoncrawl data as label "__label__wiki"
#
n = 0
for job in jobs:
j = 0
for jstr in gzip.open(job, "rt"):
j = j + 1
if j > perfile:
break
result = json.loads(jstr)
result["class"] = "cc"
print("__label__cc " + " ".join(result["raw_content"].splitlines()))
n = n + 1
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/classifier/create_corpus.py
|
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
from multiprocessing import Pool
# Get all jobs.
# Each job corresponds to a file ends with .gz, with middle or head in it
#
jobs = []
os.chdir(sys.argv[1])
for file in glob.glob("*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
print("TOTAL # JOBS:", len(jobs))
# For each row, run classifier and output
# (text: [...], source, pred_label, pred_label_prob, wiki_prob)
#
def run(job):
import fasttext
model = fasttext.load_model("../fastText/model.bin")
print(job)
ofile = gzip.open(job + ".dedup.classifier.gz", "wt")
ostat = open(job + ".dedup.classifier.gz.stat", "wt")
line = 0
for jstr in gzip.open(job + ".result", "rt"):
result = json.loads(jstr)
content = result["raw_content"]
output = {}
# run classifier
text = " ".join(content.strip().splitlines())
pred = model.predict(text)
(pred_label, pred_prob) = pred
pred_label = pred_label[0]
wiki_prob = pred_prob[0]
if pred_label == "__label__cc":
wiki_prob = 1 - wiki_prob
output["pred_label"] = pred_label
output["pred_label_prob"] = pred_prob[0]
output["wiki_prob"] = wiki_prob
output["text"] = content
output["source"] = "cc/" + job + f"/line{line}"
line = line + 1
nchars = len(content)
ostat.write(f"{nchars}\t{wiki_prob}\n")
ofile.write(json.dumps(output) + "\n")
ofile.close()
ostat.close()
with Pool(224) as p:
p.map(run, jobs)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/classifier/classify.py
|
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--data",
"-d",
help="path to articles xml",
default="enwiki-20230401-pages-articles-multistream.xml",
)
parser.add_argument(
"--output",
"-o",
help="path to extracted urls file",
default="./extracted_urls.txt",
)
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="show progress",
)
args = parser.parse_args()
def get_urls():
with open(args.data, "r", errors="ignore") as f, open(args.output, "w") as out:
for i, line in enumerate(f, start=1):
refs = re.search("<ref>(.*)</ref>", line)
if refs is not None:
results = re.findall(
r"\b(?:https?|telnet|gopher|file|wais|ftp):[\w/#~:.?+=&%@!\-.:?\\-]+?(?=[.:?\-]*(?:[^\w/#~:.?+=&%@!\-.:?\-]|$))",
refs.group(0),
)
if len(results) > 0:
for result in results:
out.write(result + "\n")
if args.verbose and i % 1000000 == 0:
print("Lines searched: {}".format(i), end="\r")
def main():
get_urls()
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/classifier/extract_urls.py
|
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
from multiprocessing import Pool
# Get all jobs.
# Each job corresponds to a file ends with .gz, with middle or head in it
#
jobs = []
os.chdir(sys.argv[1])
for file in glob.glob("*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
print("TOTAL # JOBS:", len(jobs))
# Output (URL, digest) pairs for each job
#
def run(job):
print(job)
ofile = gzip.open( job + ".dedup", "wt")
for jstr in gzip.open(job, "rt"):
result = json.loads(jstr)
ofile.write(result['url'] + " " + result['digest'] + "\n")
ofile.close()
with Pool(64) as p:
p.map(run, jobs)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/dedup/dedup_phase1.py
|
import glob, os
import json
import sys
import re
import hashlib
import gzip
import os
from multiprocessing import Pool, Value
import multiprocessing
import gc
# Get all jobs
#
jobs = []
os.chdir(sys.argv[1])
for file in glob.glob("*/*.gz"):
if ("middle" in file or "head" in file) and "dedup" not in file:
jobs.append(file)
print("TOTAL # JOBS:", len(jobs))
# Load all pairs of (fileid, digest)
#
counter = Value('i', 0)
lock = multiprocessing.Lock()
def load(job):
load_job = {}
global counter
with counter.get_lock():
counter.value += 1
print(counter.value, job)
# test: early stop
#if counter.value > 10:
# return {}
for line in gzip.open(job + ".dedup", mode='rt'):
(fileid, digest) = line.split(" ")
load_job[fileid] = digest
return load_job
with Pool(64) as p:
loaded_ = p.map(load, jobs)
loaded = {}
for j in range(0, len(jobs)):
loaded[jobs[j]] = loaded_[j]
# Dedup
# unique fileIDs are in unique_fileid
# also write unique fileID for each job in its own file
#
table = {}
unique_fileid = {}
#ufile = gzip.open("uniqie_fileids", "wt")
for job in loaded:
print("loaded", job, len(loaded[job]))
ufile = gzip.open(job + ".uniqie_fileids", "wt")
for fileid in loaded[job]:
digest = loaded[job][fileid]
if digest not in table:
table[digest] = 1
unique_fileid[fileid] = 1
ufile.write(fileid + "\n")
ufile.close()
print("total unique", len(unique_fileid))
# GC
#
del loaded_
del loaded
gc.collect()
# Write out the result
#
def write(job):
global counter
with counter.get_lock():
counter.value += 1
print("write", counter.value, job)
ofile = gzip.open( job + ".result", "wt")
wrote = 0
total = 0
for jstr in gzip.open(job, "rt"):
result = json.loads(jstr)
if result['url'] in unique_fileid:
wrote = wrote + 1
ofile.write(jstr)
total = total + 1
print(" wrote", wrote, "/", total)
ofile.close()
with Pool(64) as p:
p.map(write, jobs)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/data_prep/cc/dedup/dedup_phase2.py
|
from megatron.data.indexed_dataset import MMapIndexedDataset
from transformers import AutoTokenizer
import argparse
# get the first argument as a file name, and an output file
parser = argparse.ArgumentParser()
parser.add_argument("file_name", help="the file name to read")
parser.add_argument("output_file", help="the file name to write")
args = parser.parse_args()
ds = MMapIndexedDataset(args.file_name)
tok = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
num_tokens = [
len(ds[i]) for i in range(len(ds))
]
# write it out to an output_file
with open(args.output_file, "w") as f:
for i in num_tokens:
f.write(f"{i}\n")
print(f'Total tokens: {sum(num_tokens)}')
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/tokenization/count_tokens.py
|
"""
Embed each row of a `.jsonl` file using a HuggingFace model and save the embeddings.
Authors: The Meerkat Team (Karan Goel, Sabri Eyuboglu, Arjun Desai)
License: Apache License 2.0
"""
import os
from argparse import ArgumentParser
import numpy as np
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.json
import torch
import torch.nn.functional as F
from rich import print
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
import meerkat as mk
class TruncatedDataset:
def __init__(
self,
df: mk.DataFrame,
tokenizer: AutoTokenizer,
chunk_size: int,
):
self.df = df
self.tokenizer = tokenizer
self.chunk_size = chunk_size
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
data = self.df[idx]
tokenized = self.tokenizer(
data["text"],
pad_to_multiple_of=self.chunk_size,
padding=True,
)
return {
"input_ids": torch.tensor(tokenized["input_ids"][: self.chunk_size]),
"attention_mask": torch.tensor(
tokenized["attention_mask"][: self.chunk_size]
),
"doc_id": data["id"],
"chunk_id": 0,
}
def create_model_and_tokenizer(
model_name: str,
cache_dir: str,
):
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
print("Loading model...")
model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir).cuda()
return model, tokenizer
def prepare(feature_dir: str, savepath: str):
if not os.path.exists(feature_dir):
os.makedirs(feature_dir)
if os.path.exists(savepath):
exit()
def load_dataframe(path):
print("Loading dataframe...")
# Load in the JSON.
df = mk.from_json(
path,
lines=True,
backend="arrow",
read_options=pa.json.ReadOptions(**{"block_size": 10 << 20}),
)
if "meta" in df.columns:
struct_array = df["meta"].data
result = {}
for field_index in range(struct_array.type.num_fields):
field = struct_array.type.field(field_index)
result[field.name] = mk.ArrowScalarColumn(
pc.struct_field(struct_array, field.name)
)
meta_df = mk.DataFrame(result)
else:
meta_df = mk.DataFrame()
if "id" in meta_df.columns:
df["id"] = meta_df["id"]
elif "arxiv_id" in meta_df.columns:
df["id"] = meta_df["arxiv_id"]
else:
try:
df["id"] = meta_df["pkey"]
except:
df.create_primary_key("id")
df = df.set_primary_key("id")
try:
df = df.drop("pkey")
except ValueError:
pass
assert set(df.columns) >= set(
["id", "text"]
), f"Unexpected columns: {set(df.columns)}"
return df
def create_dataloader(
filepath: str,
tokenizer: AutoTokenizer,
chunk_size: int,
batch_size: int,
num_workers: int,
):
dataset = TruncatedDataset(
load_dataframe(filepath),
tokenizer,
chunk_size=chunk_size,
)
return torch.utils.data.DataLoader(
dataset,
shuffle=False,
batch_size=batch_size,
num_workers=num_workers,
)
@torch.no_grad()
def extract_features(
model: torch.nn.Module,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
):
"""Extract features from the model."""
# Extract features from the model
attention_mask = attention_mask.cuda()
outputs = model.forward(input_ids.cuda(), attention_mask=attention_mask)[0]
# Use the attention mask to average the output vectors.
outputs = outputs.cpu()
attention_mask = attention_mask.cpu()
features = (outputs * attention_mask.unsqueeze(2)).sum(1) / attention_mask.sum(
1
).unsqueeze(1).cpu()
# Normalize embeddings
features = F.normalize(features, p=2, dim=1).numpy()
return features
def run_feature_extraction(
model: torch.nn.Module,
dataloader: torch.utils.data.DataLoader,
):
print("Feature extraction...")
storage = []
for batch in tqdm(dataloader):
features = extract_features(model, batch["input_ids"], batch["attention_mask"])
storage.append(features)
# Save the features to disk.
return np.concatenate(storage, axis=0).reshape(-1, 384)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--filepath", type=str)
parser.add_argument("--num_workers", type=int, default=16)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--chunk_size", type=int, default=256)
parser.add_argument(
"--model_name",
type=str,
default="sentence-transformers/all-MiniLM-L6-v2",
)
parser.add_argument("--cache_dir", type=str, default="/home/karan/models/")
parser.add_argument(
"--feature_dir",
type=str,
default=f"/home/karan/data/pyjama/features/",
)
args = parser.parse_args()
feature_dir = os.path.join(args.feature_dir, args.model_name)
CUDA_VISIBLE_DEVICES = args.gpu
os.environ["CUDA_VISIBLE_DEVICES"] = str(CUDA_VISIBLE_DEVICES)
# Get num_gpus on this machine.
num_gpus = torch.cuda.device_count()
filepath = args.filepath
filename = os.path.basename(filepath)
savepath = os.path.join(feature_dir, filename.replace(".jsonl", ".npy"))
prepare(feature_dir, savepath)
model, tokenizer = create_model_and_tokenizer(args.model_name, args.cache_dir)
dataloader = create_dataloader(
filepath,
tokenizer,
chunk_size=args.chunk_size,
batch_size=args.batch_size,
num_workers=args.num_workers,
)
features = run_feature_extraction(model, dataloader)
np.save(savepath, features)
print("Done.")
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/viz/embed_jsonl.py
|
import os
from argparse import ArgumentParser
from glob import glob
import faiss
import numpy as np
from tqdm.auto import tqdm
def build_pca(
xb: np.ndarray,
d_in: int = 384,
d_out: int = 32,
):
pca = faiss.PCAMatrix(d_in, d_out)
pca.train(xb)
return pca
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--feature_dir",
type=str,
default="~/data/pyjama/features/sentence-transformers/all-MiniLM-L6-v2/",
)
args = parser.parse_args()
dir = os.path.expanduser(args.feature_dir)
# Load in all the files.
files = sorted(list(glob(f"{dir}/*.sampled.npy")))
print(f"Loading {len(files)} files into memory...")
arrs = [np.load(file) for file in tqdm(files)]
# Concatenate all the arrays
arr = np.concatenate(arrs, axis=0)
print("Combined arr:", arr.shape)
# Create the PCA
pca = build_pca(arr)
faiss.write_VectorTransform(pca, f"{dir}/pca32.faiss")
# Apply to all vectors.
arr_reduced = pca.apply(arr)
# Save the reduced array.
np.save(f"{dir}/pca32.npy", arr_reduced)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/viz/reduce_pca32.py
|
import faiss
import numpy as np
import torch
import torch.nn.functional as F
from rich import print
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
def build_flat_index(
xb: np.ndarray,
d: int = 32,
):
index = faiss.IndexFlatL2(d)
index.add(xb)
return index
def load_index(
path: str,
):
"""Load the index from a path."""
index = faiss.read_index(path)
return index
def load_pca(path: str):
"""Load the PCA from a path."""
pca = faiss.read_VectorTransform(path)
return pca
def create_model_and_tokenizer(
model_name: str,
cache_dir: str = None,
):
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
print("Loading model...")
model = AutoModel.from_pretrained(model_name, cache_dir=cache_dir)
return model, tokenizer
@torch.no_grad()
def extract_features(
model: torch.nn.Module,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
):
"""Extract features from the model."""
# Extract features from the model
attention_mask = attention_mask
outputs = model.forward(input_ids, attention_mask=attention_mask)[0]
# Use the attention mask to average the output vectors.
outputs = outputs.cpu()
attention_mask = attention_mask.cpu()
features = (outputs * attention_mask.unsqueeze(2)).sum(1) / attention_mask.sum(
1
).unsqueeze(1).cpu()
# Normalize embeddings
features = F.normalize(features, p=2, dim=1).numpy()
return features
def extract_features_single(
text: str,
model: torch.nn.Module,
tokenizer: AutoTokenizer,
chunk_size: int = 512,
):
"""Extract features from the model."""
tokenized = tokenizer(
[text],
pad_to_multiple_of=chunk_size,
padding=True,
)
return extract_features(
model,
torch.tensor(tokenized["input_ids"][:chunk_size]),
torch.tensor(tokenized["attention_mask"][:chunk_size]),
)
def run_feature_extraction(
model: torch.nn.Module,
dataloader: torch.utils.data.DataLoader,
):
print("Feature extraction...")
storage = []
carry = (None, None)
for batch in tqdm(dataloader):
features = extract_features(model, batch["input_ids"], batch["attention_mask"])
chunk_id = np.array(batch["chunk_id"])
doc_id = np.array(batch["doc_id"])
if (chunk_id == 0).all():
storage.append(features)
elif (chunk_id == 0).any():
# Close out the previous document.
# Aggregate based on the document ID.
agg = np.array(
[features[doc_id == i].mean(axis=0) for i in np.unique(doc_id)]
)
# Number of chunks in the first document.
num_chunks_first = (doc_id == doc_id[0]).sum()
# Number of chunks in the last document.
num_chunks_last = (doc_id == doc_id[-1]).sum()
# Batch falls on a document boundary.
if chunk_id[0] == 0:
# Close out the previous document and update the carry.
storage.append(carry[0])
carry = (None, None)
# Batch does not fall on a document boundary.
if carry[0] is not None:
# Reweight the first chunk.
agg[0] = (agg[0] * num_chunks_first + carry[0] * carry[1]) / (
num_chunks_first + carry[1]
)
# Update the carry.
carry = (agg[-1], num_chunks_last)
# Put the features in storage.
storage.append(agg[:-1])
else:
# All chunks should have the same document ID.
assert (doc_id == doc_id[0]).all()
# Aggregate.
agg = np.mean(features, axis=0)
# Reweight.
agg = (agg * len(features) + carry[0] * carry[1]) / (
len(features) + carry[1]
)
# Update the carry: make sure to keep track of the number of chunks.
carry = (agg, len(features) + carry[1])
# Save the features to disk.
return np.concatenate(storage, axis=0).reshape(-1, 384)
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/viz/utils.py
|
import os
from argparse import ArgumentParser
import faiss
import numpy as np
def build_index(
xb: np.ndarray,
d: int = 32,
):
index = faiss.index_factory(d, "IVF100,PQ8")
# Sample 1_000_000 vectors to train the index.
xt = xb[np.random.choice(xb.shape[0], 1_000_000, replace=False)]
index.train(xt)
index.add(xb)
return index
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--dir",
type=str,
default="~/data/pyjama/features/sentence-transformers/all-MiniLM-L6-v2",
)
args = parser.parse_args()
dir = os.path.expanduser(args.dir)
# Load in the embeddings.
arr = np.load(f"{dir}/pca32.npy")
print(arr.shape)
# Create the index.
index = build_index(arr)
faiss.write_index(index, f"{dir}/index_ivf100_pq8.faiss")
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/viz/index_faiss.py
|
"""
A Meerkat app for visualizing the Github subset of the RedPajama dataset.
Authors: The Meerkat Team (Karan Goel, Sabri Eyuboglu, Arjun Desai)
License: Apache License 2.0
"""
import numpy as np
import tempfile
from utils import extract_features_single, load_pca, create_model_and_tokenizer
import meerkat as mk
from meerkat.datasets.utils import download_url
with tempfile.TemporaryDirectory() as temp_dir:
path = download_url(
"https://huggingface.co/datasets/meerkat-ml/lemma/resolve/main/pca.faiss",
temp_dir,
)
pca = load_pca(path)
model_name = "sentence-transformers/all-MiniLM-L6-v2"
model, tokenizer = create_model_and_tokenizer(model_name)
df = mk.read(
"https://huggingface.co/datasets/meerkat-ml/lemma/resolve/main/filtered_08cdfa755e6d4d89b673d5bd1acee5f6.mk.tar.gz"
)
def get_full_text(text_sample: str, repo_name: str, ref: str, path: str):
"""
Get the full text of a code sample from Github.
"""
ref = ref.split("/")[-1]
import requests
return requests.get(
f"https://raw.githubusercontent.com/{repo_name}/{ref}/{path}"
).text
df["text_sample"] = df["text_sample"].format(mk.format.CodeFormatterGroup())
df["full_text"] = df.defer(get_full_text).format(mk.format.CodeFormatterGroup().defer())
df["search"] = mk.ArrowScalarColumn(np.zeros(len(df)))
df["embeddings"] = df["embeddings"].format(mk.format.TextFormatterGroup())
@mk.endpoint
def search(df: mk.DataFrame, new_code: str = ""):
"""The endpoint for executing a search query."""
if new_code != "":
features = extract_features_single(new_code, model, tokenizer)
pca_features = pca.apply(features)
df["search"] = np.matmul(df["embeddings"].data, pca_features.T).squeeze()
df.set(df)
editor = mk.gui.Editor(on_run=search.partial(df), title="Search")
# build controls for the scatter plot
NUM_PCA_COMPONENTS = 5
for i in range(NUM_PCA_COMPONENTS):
df[f"pca_{i+1}"] = df["embeddings"][:, i]
options = [f"pca_{i+1}" for i in range(NUM_PCA_COMPONENTS)] + ["search"]
x_select = mk.gui.Select(
options,
value="pca_1",
)
x_control = mk.gui.html.div(
[mk.gui.Text("X Axis"), x_select], classes="grid grid-cols-[auto_1fr] gap-2"
)
y_select = mk.gui.Select(
options,
value="pca_2",
)
y_control = mk.gui.html.div(
[mk.gui.Text("Y Axis"), y_select], classes="grid grid-cols-[auto_1fr] gap-2"
)
color_select = mk.gui.Select(
options,
value="search",
)
color_control = mk.gui.html.div(
[mk.gui.Text("Color"), color_select], classes="grid grid-cols-[auto_1fr] gap-2"
)
select = mk.gui.html.div(
[x_control, y_control, color_control], classes="grid grid-cols-3 gap-8 px-10"
)
scatter = mk.gui.plotly.DynamicScatter(
df=df,
x=x_select.value,
y=y_select.value,
color=color_select.value,
max_points=10_000,
)
gallery = mk.gui.Gallery(
scatter.filtered_df, main_column="text_sample", tag_columns=["language"]
)
page = mk.gui.Page(
component=mk.gui.html.div(
[
mk.gui.html.div(
[editor, select, scatter],
classes="h-screen grid grid-rows-[1fr_auto_3fr] gap-4",
),
gallery,
],
classes="grid grid-cols-2 gap-4 h-screen py-6",
),
id="lemma",
)
page.launch()
|
EXA-1-master
|
exa/datasets/RedPajama-Data-main/viz/main.py
|
import argparse
import os
import re
import shutil
from pathlib import Path
from cloudpathlib import CloudPath
import img2dataset
from huggingface_hub import snapshot_download
from scale_configs import available_scales
def path_or_cloudpath(s):
if re.match(r"^\w+://", s):
return CloudPath(s)
return Path(s)
HF_REPO = 'mlfoundations/datacomp_pools'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--scale', type=str, required=False, choices=available_scales(simple_names=True)[1:], default='small', help='Competition scale.')
parser.add_argument('--data_dir', type=path_or_cloudpath, required=True, help='Path to directory where the data (webdataset shards) will be stored.')
parser.add_argument('--metadata_dir', type=path_or_cloudpath, default=None, help='Path to directory where the metadata will be stored. If not set, infer from data_dir.')
parser.add_argument('--download_npz', help='If true, also download npz files.', action='store_true', default=False)
parser.add_argument('--overwrite_metadata', help='If true, force re-download of the metadata files.', action='store_true', default=False)
parser.add_argument('--skip_bbox_blurring', help='If true, skip bounding box blurring on images while downloading.', action='store_true', default=False)
parser.add_argument('--processes_count', type=int, required=False, default=16, help='Number of processes for download.')
parser.add_argument('--thread_count', type=int, required=False, default=128, help='Number of threads for download.')
args = parser.parse_args()
metadata_dir = args.metadata_dir
if metadata_dir is None:
metadata_dir = args.data_dir / 'metadata'
# Download the metadata files if needed.
if args.overwrite_metadata or not metadata_dir.exists():
if metadata_dir.exists():
print(f'Cleaning up {metadata_dir}')
shutil.rmtree(metadata_dir)
metadata_dir.mkdir(parents=True)
print(f'Downloading metadata to {metadata_dir}...')
if args.scale != 'xlarge':
hf_metadata_dir = snapshot_download(
repo_id=HF_REPO,
allow_patterns=f'{args.scale}/*.parquet',
cache_dir=metadata_dir / 'hf',
repo_type='dataset'
)
if args.download_npz:
print('Downloading npz files')
snapshot_download(
repo_id=HF_REPO,
allow_patterns=f'{args.scale}/*.npz',
cache_dir=metadata_dir / 'hf',
repo_type='dataset'
)
else:
# Slightly different handling for xlarge scale
hf_metadata_dir = snapshot_download(
repo_id=HF_REPO,
allow_patterns=f'{args.scale}/*/*.parquet',
cache_dir=metadata_dir / 'hf',
repo_type='dataset'
)
if args.download_npz:
print('Downloading npz files')
npz_hf_metadata_dir = snapshot_download(
repo_id=HF_REPO,
allow_patterns=f'{args.scale}_npzs/*/*.npz',
cache_dir=metadata_dir / 'hf',
repo_type='dataset'
)
# Create symlinks
hf_metadata_dir = Path(hf_metadata_dir) / f'{args.scale}'
for filename in hf_metadata_dir.rglob('*.parquet'):
link_filename = metadata_dir / filename.name
true_filename = filename.resolve()
link_filename.symlink_to(true_filename)
if args.download_npz:
if args.scale != 'xlarge':
for filename in hf_metadata_dir.rglob('*.npz'):
link_filename = metadata_dir / filename.name
true_filename = filename.resolve()
link_filename.symlink_to(true_filename)
else:
npz_hf_metadata_dir = Path(npz_hf_metadata_dir) / f'{args.scale}_npzs'
for filename in npz_hf_metadata_dir.rglob('*.npz'):
link_filename = metadata_dir / filename.name
true_filename = filename.resolve()
link_filename.symlink_to(true_filename)
print('Done downloading metadata.')
else:
print(f'Skipping download of metadata because {metadata_dir} exists. Use --overwrite_metadata to force re-downloading.')
# Download images.
shard_dir = args.data_dir / 'shards'
shard_dir.mkdir(parents=True, exist_ok=True)
print(f'Downloading images to {shard_dir}')
bbox_col = None if args.skip_bbox_blurring else 'face_bboxes'
img2dataset.download(
url_list=str(metadata_dir),
image_size=512,
output_folder=str(shard_dir),
processes_count=args.processes_count,
thread_count=args.thread_count,
resize_mode='keep_ratio_largest',
resize_only_if_bigger=True,
output_format='webdataset',
input_format='parquet',
url_col='url',
caption_col='text',
bbox_col=bbox_col,
save_additional_columns=['uid'],
number_sample_per_shard=10000,
oom_shard_count=8
)
print('Done!')
|
EXA-1-master
|
exa/datasets/datacomp/download_upstream.py
|
#!/usr/bin/env python3
import argparse
import bisect
import copy
import logging
import multiprocessing as mp
import os
import queue
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
from cloudpathlib.enums import FileCacheMode
from dataclasses import dataclass
from functools import lru_cache
from multiprocessing.managers import NamespaceProxy, AcquirerProxy
from pathlib import Path
from typing import List, Optional, Callable, Union, Dict
import cv2
import numpy as np
import simdjson
import tqdm
import webdataset as wds
import pyarrow as pa
import pyarrow.compute as pc
import pyarrow.parquet as pq
from cloudpathlib import CloudPath
from img2dataset.blurrer import BoundingBoxBlurrer
from webdataset.tariterators import (
base_plus_ext,
url_opener,
tar_file_expander,
valid_sample,
)
# we always read and write files exactly once so we can use the strictest caching policy
os.environ["CLOUPATHLIB_FILE_CACHE_MODE"] = FileCacheMode.close_file.name
Pipe = wds.writer.gopen.Pipe
Pathy = Union[Path, CloudPath]
class ColoredConsoleHandler(logging.Handler):
# TODO: Abstract ANSI color escapes
def __init__(self, sub_handler=None):
super().__init__()
self.sub_handler = (
logging.StreamHandler() if sub_handler is None else sub_handler
)
def emit(self, record):
# Need to make a actual copy of the record
# to prevent altering the message for other loggers
myrecord = copy.copy(record)
levelno = myrecord.levelno
# NOTSET and anything else
color = "\x1b[0m" # normal
tag = "NOTSET"
if levelno >= logging.FATAL:
color = "\x1b[31m" # red
tag = "FATAL"
elif levelno >= logging.ERROR:
color = "\x1b[31m" # red
tag = "ERROR"
elif levelno >= logging.WARNING:
color = "\x1b[33m" # yellow
tag = "WARN"
elif levelno >= logging.INFO:
color = "\x1b[32m" # green
tag = "INFO"
elif levelno >= logging.DEBUG:
color = "\x1b[35m" # pink
tag = "DEBUG"
myrecord.msg = f"{color}[{tag}]\x1b[0m {myrecord.msg}"
self.sub_handler.emit(myrecord)
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except Exception:
self.handleError(record)
class MultiProcessingHandler(logging.Handler):
def __init__(self, name, queue):
super().__init__()
self.name = name
self.queue = queue
def _format_record(self, record):
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
self.format(record)
record.exc_info = None
record.msg = f"[{self.name}] {record.msg}"
return record
def emit(self, record):
record = self._format_record(record)
self.queue.put_nowait(record)
def setup_process_logging(log_queue, worker_id):
logger = logging.getLogger("resharder")
for handler in logger.handlers:
logger.removeHandler(handler)
logger.addHandler(MultiProcessingHandler(f"worker {worker_id:03d}", log_queue))
return logger
logger = logging.getLogger("resharder")
logger.setLevel(logging.INFO)
log_handler = logging.StreamHandler()
logger.addHandler(ColoredConsoleHandler(log_handler))
log_handler.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
# Monkey-patch webdataset to support S3 via aws s3
class ResharderPipe(Pipe):
def wait_for_child(self):
self.status = self.proc.wait()
if self.proc.stderr:
stderr = self.proc.stderr.read().decode()
# Don't pass KeyboardInterrupt through
if stderr and not stderr.endswith("\nKeyboardInterrupt\n"):
msg = stderr.rstrip("\n")
logger.error(f"ResharderPipe captured error: {msg}")
if self.status not in self.ignore_status and not self.ignore_errors:
logger.error(
f"ResharderPipe {self.args}: exit {self.status} (read) {wds.writer.gopen.info}"
)
def __del__(self):
self.stream.close()
self.proc.wait(self.timeout)
def gopen_aws(url, mode="rb", bufsize=8192):
"""Open a URL with `aws s3`.
:param url: url (usually, s3:// etc.)
:param mode: file mode
:param bufsize: buffer size
"""
# TODO not sure about ignore_status
if mode[0] == "r":
cmd = f"aws s3 cp '{url}' -"
return ResharderPipe(
cmd,
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 23],
stderr=subprocess.PIPE,
)
elif mode[0] == "w":
cmd = f"aws s3 cp - '{url}'"
return ResharderPipe(
cmd,
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 26],
stderr=subprocess.PIPE,
)
else:
raise ValueError(f"{mode}: unknown mode")
wds.gopen_schemes.setdefault("s3", gopen_aws)
class ShardWriter:
"""Like TarWriter but splits into multiple shards."""
def __init__(
self,
namer: Callable,
maxcount: int = 100000,
maxsize: float = 3e9,
post: Optional[Callable] = None,
start_shard: int = 0,
logger: Optional[logging.Logger] = None,
**kw,
):
"""Create a ShardWriter.
:param namer: function mapping shard number to output file name
:param maxcount: maximum number of records per shard (Default value = 100000)
:param maxsize: maximum size of each shard (Default value = 3e9)
:param kw: other options passed to TarWriter
"""
self.verbose = 1
self.kw = kw
self.maxcount = maxcount
self.maxsize = maxsize
self.post = post
self.tarstream = None
self.shard = start_shard
self.namer = namer
self.logger = logger
self.total = 0
self.count = 0
self.size = 0
self.fname = None
def next_stream(self):
"""Close the current stream and move to the next."""
self.finish()
self.fname = self.namer(self.shard)
self.shard += 1
self.tarstream = wds.TarWriter(self.fname, **self.kw)
self.count = 0
self.size = 0
def write(self, obj):
"""Write a sample.
:param obj: sample to be written
"""
if (
self.tarstream is None
or self.count >= self.maxcount
or self.size >= self.maxsize
):
self.next_stream()
try:
size = self.tarstream.write(obj)
self.count += 1
self.total += 1
self.size += size
except Exception:
logger.error(traceback.format_exc())
# outrageous hack to ensure we don't write more to the broken pipe
self.tarstream.tarstream.fileobj.closed = True
self.tarstream = None
self.next_stream()
def finish(self):
"""Finish all writing (use close instead)."""
if self.tarstream is not None:
try:
self.tarstream.close()
except Exception:
logger.error(traceback.format_exc())
assert self.fname is not None
if callable(self.post):
self.post(fname=self.fname, count=self.count, size=self.size)
self.tarstream = None
self.logger.debug(
f"wrote {self.fname} {self.size / 1e9:.1f} GB, {self.count}/{self.total}"
)
def close(self):
"""Close the stream."""
self.finish()
del self.tarstream
del self.shard
del self.count
del self.size
def __enter__(self):
"""Enter context."""
return self
def __exit__(self, *args, **kw):
"""Exit context."""
self.close()
def group_by_keys_nothrow(
data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None
):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext)
:param lcase: convert suffixes to lower case (Default value = True)
"""
current_sample = None
for filesample in data:
assert isinstance(filesample, dict)
fname, value = filesample["fname"], filesample["data"]
prefix, suffix = keys(fname)
if prefix is None:
continue
if lcase:
suffix = suffix.lower()
if current_sample is None or prefix != current_sample["__key__"]:
if valid_sample(current_sample):
yield current_sample
current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
# FIXME webdataset version throws if suffix in current_sample, but we have a potential for
# this happening in the current LAION400m dataset if a tar ends with same prefix as the next
# begins, rare, but can happen since prefix aren't unique across tar files in that dataset
if suffix in current_sample:
if handler is not None:
handler(
ValueError(
f"{fname}: duplicate file name in tar file {suffix} {set(current_sample.keys())}"
)
)
if valid_sample(current_sample):
yield current_sample
current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
if valid_sample(current_sample):
yield current_sample
def tarfile_samples_nothrow(src, handler):
# NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw
try:
streams = url_opener(src, handler=handler)
files = tar_file_expander(streams, handler=handler)
samples = group_by_keys_nothrow(files, handler=handler)
except Exception as exn:
exn.args = exn.args + (src)
handler(exn)
return []
return samples
tarfile_to_samples_nothrow = wds.filters.pipelinefilter(tarfile_samples_nothrow)
@dataclass(frozen=True, slots=True)
class Shard:
shard_id: int
data_start: int
size: int
@dataclass
class WorkerTask:
worker_id: int
shards: List[Shard]
parquets: Optional[List[str]]
u16 = np.dtype("u8,u8")
def ceildiv(a, b):
return -(-a // b)
def path_or_cloudpath(s: str) -> Pathy:
if re.match(r"^\w+://", s):
return CloudPath(s.rstrip("/"))
return Path(s)
def make_argparser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-i",
"--input-dir",
type=path_or_cloudpath,
required=True,
help="input directory containing a webdataset",
)
parser.add_argument(
"-o",
"--output-dir",
type=path_or_cloudpath,
required=True,
help="output directory",
)
parser.add_argument(
"-s",
"--subset-file",
type=path_or_cloudpath,
required=True,
help="subset file, either a NumPy or memmap array of 128 bit hashes",
)
parser.add_argument(
"-n",
"--num-shards",
type=int,
help="number of shards to process (beware of off-by-ones)",
)
parser.add_argument(
"--first-shard",
type=int,
default=0,
help="index of first shard to process",
)
parser.add_argument(
"-j",
"--num-workers",
default=mp.cpu_count(),
type=int,
help="number of workers to use",
)
parser.add_argument(
"--shard-size",
default=10000,
type=int,
help="maximum number of examples per output shard",
)
parser.add_argument(
"--shard-format",
default="{:08d}.tar",
type=str,
help="format for each input shard in str.format syntax",
)
parser.add_argument(
"--output-shard-format",
default="{:08d}.tar",
type=str,
help="format for each output shard in str.format syntax",
)
parser.add_argument(
"--shard-stats-format",
default="{:08d}_stats.json",
type=str,
help="format for each input shard stats file in str.format syntax",
)
parser.add_argument(
"--output-shard-stats-format",
default="{:08d}_stats.json",
type=str,
help="format for each output shard stats file in str.format syntax",
)
parser.add_argument(
"--shard-table",
default="sizes.json",
type=path_or_cloudpath,
help="JSON file recording input shard sizes relative to INPUT_DIR",
)
parser.add_argument(
"--write-shard-table",
action="store_true",
help="write shard table to output_dir if it does not exist",
)
parser.add_argument(
"--shuffle-bufsize", default=0, type=int, help="buffer size for shuffling"
)
parser.add_argument(
"--blur-metadata-map",
type=path_or_cloudpath,
default=None,
help="Map file from shards to parquets for blurring.",
)
parser.add_argument(
"--apply-blur",
action="store_true",
help="Apply blurring to images and re-encode them",
)
parser.add_argument(
"--inject-blur-metadata",
action="store_true",
help="Add blur bounding boxes to the json field of the output examples",
)
parser.add_argument(
"--reencode-webp-quality",
type=str,
default=100,
help="Quality for re-encoding images if necessary.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="do not make any changes to the output directory",
)
parser.add_argument(
"--overwrite",
action="store_true",
help="overwrite existing files in the output directory",
)
parser.add_argument(
"-v",
"--verbose",
action="append_const",
const=1,
help="decrease the logging level",
)
parser.add_argument(
"-q",
"--quiet",
action="append_const",
const=1,
help="increase the logging level",
)
return parser
parser = make_argparser()
def guess_num_shards(
*,
input_dir: Pathy,
first_shard: int = parser.get_default("first_shard"),
shard_format: str = parser.get_default("shard_format"),
**_,
):
n = 1
def test_size(i):
shard = input_dir / shard_format.format(first_shard + i - 1)
return shard.exists()
for _ in range(40):
if not test_size(n):
break
n *= 2
else:
raise RuntimeError(f"Found too many shards (at least {n})")
if n == 1:
raise RuntimeError("Did not find any shards")
n = (
n // 2
+ bisect.bisect_right(range(n // 2, n), False, key=lambda i: not test_size(i))
- 1
)
return n
def load_shard_size(args):
shard_id, input_dir, shard_format, shard_stats_format = args
size_path = input_dir / shard_stats_format.format(shard_id)
shard_name = shard_format.format(shard_id)
shard_path = input_dir / shard_name
size = None
if size_path.exists() and shard_path.exists():
with size_path.open("r") as f:
size = int(simdjson.Parser().parse(f.read()).get("successes"))
return shard_name, size
def load_shard_metadata(
*,
input_dir: Pathy,
num_shards: int = parser.get_default("num_shards"),
first_shard: int = parser.get_default("first_shard"),
shard_format: str = parser.get_default("shard_format"),
shard_stats_format: str = parser.get_default("shard_stats_format"),
shard_table: Pathy = parser.get_default("shard_table"),
write_shard_table: bool = parser.get_default("write_shard_table"),
num_workers: int = parser.get_default("num_workers"),
**_,
):
shards = []
offset = 0
parser = simdjson.Parser()
table = {}
shard_table_path = input_dir / shard_table
if shard_table_path.exists():
logger.info(f"loading shard table {shard_table_path}")
with open(shard_table_path, "rb") as f:
try:
table = simdjson.load(f)
except ValueError as e:
logger.error(f"shard table parsing error: {e.args[0]}")
logger.info(f"shard table has size {len(table)}")
if not num_shards:
num_shards = guess_num_shards(
input_dir=input_dir,
first_shard=first_shard,
shard_format=shard_format,
)
logger.info(f"binary search found {num_shards} potential shards")
shard_ids = range(first_shard, first_shard + num_shards)
with mp.Pool(num_workers) as pool:
size_iter = pool.imap(
load_shard_size,
(
(
shard_id,
input_dir,
shard_format,
shard_stats_format,
)
for shard_id in tqdm.tqdm(shard_ids, dynamic_ncols=True, smoothing=0)
if shard_format.format(shard_id) not in table
),
chunksize=16,
)
for shard_name, size in size_iter:
if size is not None:
table[shard_name] = size
missing_shards = 0
for shard_id in shard_ids:
shard_name = shard_format.format(shard_id)
if shard_name in table:
size = table[shard_name]
shards.append(Shard(shard_id, offset, size))
offset += size
else:
logger.debug(f"missing shard {shard_name}")
missing_shards += 1
if missing_shards > 0:
logger.warning(
f"{missing_shards} shards were missing; "
"set log level to DEBUG to see list"
)
total_data = shards[-1].data_start + shards[-1].size
logger.info(f"found a total of {len(shards)} shards with {total_data} examples")
if write_shard_table and not shard_table_path.exists():
logger.info("writing shard table")
with shard_table_path.open("w") as f:
simdjson.dump(table, f)
return shards, total_data
def load_subset(*, subset_file: Path, **_):
assert not isinstance(subset_file, CloudPath)
with open(subset_file, "rb") as f:
# Detect the NumPy format magic string
if f.read(6) == b"\x93NUMPY":
subset = np.load(subset_file, mmap_mode="r")
assert subset.dtype == u16
else:
subset = np.memmap(subset_file, u16, mode="r+")
return subset
def load_parquet_metadata(
shards: List[Shard],
/,
blur_metadata_map: Optional[Pathy] = parser.get_default("blur_metadata_map"),
shard_format: str = parser.get_default("shard_format"),
input_dir: Optional[Pathy] = None,
**_,
):
if blur_metadata_map is None:
return None
with blur_metadata_map.open("r") as f:
parquets = simdjson.load(f)
parquet_table = {}
# invert the parquet → shard multi-map
for pq in parquets.values():
for shard in pq["shards"]:
shard_path = path_or_cloudpath(shard)
if input_dir is not None and shard_path.parent != input_dir:
continue
parquet_table[shard_path.name] = pq["parquet"]
parquet_list = []
missing_parquets = 0
for shard in shards:
shard_name = shard_format.format(shard.shard_id)
parquet_list.append(parquet_table.get(shard_name))
if parquet_list[-1] is None:
logger.debug(f"could not find parquet for shard {shard_name}")
missing_parquets += 1
if missing_parquets > 0:
logger.warning(
f"could not find parquets for {missing_parquets} shards; "
"set log level to DEBUG to see list"
)
return parquet_list
def plan_tasks(shards: List[Shard], parquets: Optional[List[str]] = None, /, **args):
num_workers = args["num_workers"]
worker_tasks = []
total_data = shards[-1].data_start + shards[-1].size
# evenly distribute data to workers
data_starts = [shard.data_start for shard in shards]
shard_chunks = [
np.searchsorted(data_starts, i, side="left")
for i in range(0, total_data, -(-total_data // num_workers))
]
shard_chunks.append(len(shards))
for worker_id, (shard_start, shard_end) in enumerate(
zip(shard_chunks, shard_chunks[1:])
):
if shard_start == shard_end:
continue
first_shard, last_shard = shards[shard_start], shards[shard_end - 1]
first_index = first_shard.data_start
last_index = last_shard.data_start + last_shard.size - 1
worker_parquets = (
parquets[shard_start:shard_end] if parquets is not None else None
)
logger.debug(
f"worker {worker_id:03d} will process shards {shard_start} to {shard_end-1}"
)
worker_tasks.append(
WorkerTask(worker_id, shards[shard_start:shard_end], worker_parquets)
)
return worker_tasks
def blur_image(
blurrer: BoundingBoxBlurrer,
jpg: bytes,
blur_bboxes,
reencode_webp_quality: int = parser.get_default("reencode_webp_quality"),
):
img_buf = np.frombuffer(jpg, np.uint8)
decoded = cv2.imdecode(img_buf, cv2.IMREAD_UNCHANGED)
blurred = blurrer(decoded, blur_bboxes)
encoded = cv2.imencode(
".webp",
blurred,
params=[int(cv2.IMWRITE_WEBP_QUALITY), reencode_webp_quality],
)[1].tobytes()
return encoded
def load_blur_bboxes(f):
table = pq.read_table(f, columns=["uid", "face_bboxes"])
table = table.sort_by("uid")
uids = pc.ascii_lpad(table[0], 0x20, "0")
uh = pc.cast(
pc.binary_join_element_wise(
"0x", pc.utf8_slice_codeunits(uids, 0x00, 0x10), ""
),
pa.uint64(),
).to_numpy()
lh = pc.cast(
pc.binary_join_element_wise(
"0x", pc.utf8_slice_codeunits(uids, 0x10, 0x20), ""
),
pa.uint64(),
).to_numpy()
return np.core.records.fromarrays([uh, lh]), table[1]
def copy_worker(
task: WorkerTask,
state: NamespaceProxy,
lock: AcquirerProxy,
log_queue,
*,
input_dir: Pathy,
output_dir: Pathy,
subset_file: Path,
shard_format: str = parser.get_default("shard_format"),
output_shard_format: str = parser.get_default("output_shard_format"),
output_shard_stats_format: str = parser.get_default("output_shard_stats_format"),
shard_size: int = parser.get_default("shard_size"),
shuffle_bufsize: int = parser.get_default("shuffle_bufsize"),
reencode_webp_quality: int = parser.get_default("reencode_webp_quality"),
apply_blur: bool = parser.get_default("apply_blur"),
inject_blur_metadata: bool = parser.get_default("inject_blur_metadata"),
dry_run: bool = parser.get_default("dry_run"),
**_,
):
logger = setup_process_logging(log_queue, task.worker_id)
def log_and_continue(exn):
logger.error(f"webdataset error: {repr(exn)}")
return True
subset = load_subset(subset_file=subset_file)
ds = wds.DataPipeline(
wds.SimpleShardList(
[
str(input_dir / shard_format.format(shard.shard_id))
for shard in task.shards
]
),
tarfile_to_samples_nothrow(handler=log_and_continue),
)
# create shard_name → parquet_name mapping
assert task.parquets is None or len(task.shards) == len(task.parquets)
parquet_table = (
{
shard_format.format(shard.shard_id): parquet
for shard, parquet in zip(task.shards, task.parquets)
}
if task.parquets is not None
else {}
)
@lru_cache(1)
def load_parquet(fname):
try:
logger.debug(f"loading parquet {fname}")
with path_or_cloudpath(fname).open("rb") as f:
return load_blur_bboxes(f)
except FileNotFoundError:
return None
def get_blur_bboxes_for_img(url, uid):
fname = parquet_table.get(path_or_cloudpath(url).name)
if fname is not None:
parquet = load_parquet(fname)
if parquet is None:
logger.error(f"failed to find parquet for {url}")
uids, bboxes = parquet
i = np.searchsorted(uids, uid)
if uids[i] != uid:
logger.error(
f"failed to find blur bboxes for {url}, {uid[0]:016x}{uid[1]:016x}"
)
return
return bboxes[i].as_py()
output_shard_index = None
def output_shard_namer(_shard):
nonlocal output_shard_index
with lock:
output_shard_index = state.output_shard_count
state.output_shard_count += 1
return str(output_dir / output_shard_format.format(output_shard_index))
def output_shard_size_writer(count, **_):
with (output_dir / output_shard_stats_format.format(output_shard_index)).open(
"w"
) as f:
simdjson.dump({"successes": count}, f)
sw = ShardWriter(
output_shard_namer,
maxcount=shard_size,
logger=logger,
post=output_shard_size_writer,
)
sw.verbose = False
total_data = (
task.shards[-1].data_start + task.shards[-1].size - task.shards[0].data_start
)
processed_count, output_count, blur_count, blur_time = 0, 0, 0, 0
def subset_iter():
parser = simdjson.Parser()
blurrer = BoundingBoxBlurrer()
def parse_json_safe(s):
nonlocal parser
try:
return parser.parse(s)
except RuntimeError:
logger.warning("discarding parser due to dangling reference")
# throw away the old parser
parser = simdjson.Parser()
return parser.parse(s)
def process_example(d):
nonlocal processed_count, output_count, blur_count, blur_time
if "json" not in d:
logger.error(
f"missing json for {d['__url__']}/{d['__key__']}, skipping"
)
return
json_parsed = parse_json_safe(d["json"])
key_str = json_parsed.get("uid")
# TODO: is this really the best way to get a u16 scalar?
key_u16 = np.array([divmod(int(key_str, 16), 2**64)], u16)[0]
a = np.searchsorted(subset, key_u16, "left")
b = np.searchsorted(subset, key_u16, "right")
count = b - a
if task.parquets and count > 0:
blur_bboxes = get_blur_bboxes_for_img(d["__url__"], key_u16)
if blur_bboxes is not None and len(blur_bboxes) > 0:
if apply_blur:
blur_start_time = time.perf_counter()
d["webp"] = blur_image(
blurrer, d["jpg"], blur_bboxes, reencode_webp_quality
)
del d["jpg"] # Remove jpg version of image
blur_count += 1
blur_time += time.perf_counter() - blur_start_time
if inject_blur_metadata:
json = json_parsed.as_dict()
json["face_bboxes"] = list(map(list, blur_bboxes))
d["json"] = simdjson.dumps(json).encode()
for j in range(count):
if not dry_run:
yield {**d, "__key__": f"{key_str}-{j}"}
output_count += 1
processed_count += 1
if processed_count % 1000 == 0:
log_queue.put_nowait(1000)
del json_parsed
for input_data in ds:
try:
for output_data in process_example(input_data):
yield output_data
except Exception:
logger.error(traceback.format_exc())
log_queue.put_nowait(processed_count % 1000)
it = subset_iter()
if shuffle_bufsize > 0:
it = wds.filters._shuffle(it, shuffle_bufsize, shuffle_bufsize)
try:
for d in it:
try:
sw.write(d)
except Exception:
logger.error(traceback.format_exc())
try:
sw.close()
except Exception:
logger.error(traceback.format_exc())
if processed_count != total_data:
logger.error(f"expected {total_data} samples but found {processed_count}")
with lock:
state.worker_success += 1
except KeyboardInterrupt:
logger.fatal("Caught KeyboardInterrupt, exiting...")
finally:
with lock:
state.processed_count += processed_count
state.output_count += output_count
state.blur_count += blur_count
state.blur_time += blur_time
def logging_handler(total_data, log_queue):
bar = tqdm.tqdm(total=total_data, dynamic_ncols=True, smoothing=0)
# this feels a bit ad-hoc
tqdm_handler = TqdmLoggingHandler()
tqdm_handler.setFormatter(log_handler.formatter)
handler = ColoredConsoleHandler(tqdm_handler)
while True:
try:
message = log_queue.get(timeout=1)
if message is None:
break
if isinstance(message, int):
bar.update(message)
if isinstance(message, logging.LogRecord):
handler.emit(message)
except queue.Empty:
pass
except:
traceback.print_exc(file=sys.stderr)
raise
bar.close()
def do_tasks(worker_tasks, args):
manager = mp.Manager()
state = manager.Namespace()
state.processed_count = 0
state.output_count = 0
state.blur_count = 0
state.blur_time = 0
state.output_shard_count = 0
state.worker_success = 0
lock = manager.Lock()
log_queue = manager.Queue()
# not very elegant
last_shard = worker_tasks[-1].shards[-1]
total_data = last_shard.data_start + last_shard.size
logging_thread = threading.Thread(
target=logging_handler, args=(total_data, log_queue)
)
processes = [
mp.Process(target=copy_worker, args=(task, state, lock, log_queue), kwargs=args)
for task in worker_tasks
]
logging_thread.start()
for p in processes:
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
# workers will also receive a KeyboardInterrupt
# so wait for them to terminate on their own
for p in processes:
p.join()
finally:
# send the sentinel value to the thread to tell it to exit
log_queue.put_nowait(None)
logging_thread.join()
if state.worker_success != len(worker_tasks):
logger.error(f"{len(worker_tasks) - state.worker_success} workers failed")
return state
def rmtree_contents(path: Pathy, /, overwrite, num_workers, **_):
files_exist = any(path.iterdir())
if not overwrite and files_exist:
logger.fatal(
"refusing to overwrite non-empty directory; "
"skip this check by passing --overwrite"
)
sys.exit(1)
def remove_file(path):
if path.is_file():
path.unlink()
if files_exist:
with mp.Pool(num_workers) as pool:
pool.imap(remove_file, path.iterdir(), chunksize=16)
def postprocess_output(*, output_dir, shard_format, **_):
logger.info("postprocessing output shards")
for i, shard in enumerate(sorted(output_dir.iterdir())):
shard.rename(output_dir / shard_format.format(i))
def set_loglevel(logger, /, verbose, quiet, **_):
verbose = 0 if verbose is None else sum(verbose)
quiet = 0 if quiet is None else sum(quiet)
log_levels = [
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL,
]
logger.setLevel(log_levels[max(min(1 - verbose + quiet, len(log_levels)), 0)])
def make_memory_tmpfile():
shm = Path("/dev/shm")
# file is about to be memory-mapped so using a tmpfs
# saves us a copy if it is not local to begin with
return tempfile.NamedTemporaryFile(
"w+b", prefix="resharder-", **({"dir": shm} if shm.exists() else {})
)
def main(args):
set_loglevel(logger, **vars(args))
logger.info("loading shard metadata")
shards, total_data = load_shard_metadata(**vars(args))
if len(shards) < args.num_workers:
args.num_workers = len(shards)
logger.info("deleting files from output directory")
rmtree_contents(args.output_dir, **vars(args))
if args.apply_blur and not args.blur_metadata_map:
logger.fatal("need to pass --blur-metadata-map to use --apply-blur")
if args.inject_blur_metadata and not args.blur_metadata_map:
logger.fatal("need to pass --blur-metadata-map to use --inject-blur-metadata")
# If blur is needed, retrieve json with metadata parquet locations.
if args.blur_metadata_map is not None:
logger.info("loading parquet metadata")
parquets = load_parquet_metadata(shards, **vars(args))
else:
parquets = None
with make_memory_tmpfile() as f:
if isinstance(args.subset_file, CloudPath):
with args.subset_file.open("rb") as sf:
logger.info("copying remote subset file to local machine")
shutil.copyfileobj(sf, f)
f.seek(0)
args.subset_file = Path(f.name)
if not args.dry_run:
with args.subset_file.open("rb") as sf:
logger.info("copying the subset file to the output directory")
output_filename = args.output_dir / "sample_ids.npy"
with output_filename.open("wb") as of:
shutil.copyfileobj(sf, of)
subset = load_subset(**vars(args))
logger.info(f"selecting a subset of {len(subset)} examples")
worker_tasks = plan_tasks(shards, parquets, **vars(args))
logger.info("starting workers...")
start_time = time.perf_counter()
state = do_tasks(worker_tasks, vars(args))
elapsed_time = time.perf_counter() - start_time
logger.info(
f"processed {state.processed_count} images in {elapsed_time:.3f}s ({state.processed_count/elapsed_time:.2f} images/sec)"
)
if state.processed_count != total_data:
logger.error(
f"expected {total_data} samples but found {state.processed_count}"
)
logger.info(f"output {state.output_count} images")
if state.output_count != len(subset):
logger.warning(
f"{len(subset) - state.output_count} images in the subset were not found in the input!"
)
logger.info(f"wrote {state.output_shard_count} output shards")
if state.blur_count > 0:
logger.info(f"applied blur to {state.blur_count} images")
blur_percent = state.blur_time / (args.num_workers * elapsed_time) * 100
logger.info(
f"spent {state.blur_time:.3f} worker seconds ({blur_percent:0.1f}% of total) blurring images"
)
if not args.dry_run:
with (args.output_dir / "meta.json").open("w") as f:
simdjson.dump(
{
**{k: str(v) for k, v in vars(args).items()},
**vars(state._getvalue()),
"cwd": str(Path.cwd()),
},
f,
)
if __name__ == "__main__":
args = parser.parse_args()
main(args)
|
EXA-1-master
|
exa/datasets/datacomp/resharder.py
|
import argparse
import os
import torch
import shutil
import pickle
import re
import collections
import json
from scale_configs import get_scale_config, available_scales
from pathlib import Path
from cloudpathlib import CloudPath
from training.main import main
from training.distributed import world_info_from_env
def prepare_filename(filename):
filename = str(filename)
if filename.startswith('s3://'):
return f'pipe:aws s3 cp {filename} -'
return filename
def split_filename(pattern, filename):
filename = str(filename)
pattern_match = pattern.search(filename)
pos = pattern_match.start()
return filename[:pos], filename[pos:]
def get_input_shards(data_dir, weights):
# Handle multiple directories
if '::' in str(data_dir):
split_data_dir = str(data_dir).split('::')
data_dirs = [path_or_cloudpath(subdir) for subdir in split_data_dir]
if weights is None:
split_weights = [None for _ in split_data_dir]
else:
split_weights = weights.split('::')
assert len(split_weights) == len(split_data_dir)
input_strs_and_weights = [get_input_shards(subdir, weight) for (subdir, weight) in zip(data_dirs, split_weights)]
input_strs, input_weights = zip(*input_strs_and_weights)
input_strs = '::'.join(input_strs)
if weights is not None:
weights = '::'.join(input_weights)
return input_strs, weights
# Handle raw shards
if data_dir.suffix == '.tar':
return prepare_filename(data_dir), weights
# Handle folders
files_or_subdirs = list(data_dir.iterdir())
data_str_components = []
prefix_map = collections.defaultdict(list)
pattern = re.compile('\d+$') # Sequence of digits at the end of the string
count_tars = 0
for file_or_subdir in files_or_subdirs:
if file_or_subdir.suffix == '.tar':
shard = file_or_subdir.with_suffix('')
prefix, suffix = split_filename(pattern, shard)
prefix_map[prefix].append(suffix)
count_tars += 1
elif file_or_subdir.is_dir():
# If the folder is generated by the resharder, the metadata file contains how many shards there are.
metadata_file = file_or_subdir / 'meta.json'
if metadata_file.exists():
with open(metadata_file, 'r') as f:
metadata = json.load(f)
shard_count = metadata['output_shard_count']
shard_format = metadata['output_shard_format']
first_shard = shard_format.format(0).replace(".tar", "")
last_shard = shard_format.format(shard_count-1).replace(".tar", "")
filename = f'{{{first_shard}..{last_shard}}}.tar'
subfolder_str = prepare_filename(file_or_subdir / filename)
data_str_components.append(subfolder_str)
else:
sub_data_strs, _ = get_input_shards(file_or_subdir, weights)
data_str_components.extend(sub_data_strs.split('::'))
for prefix in sorted(list(prefix_map.keys())):
last_tar = max([int(suffix) for suffix in prefix_map[prefix]])
number_of_zeros = len(prefix_map[prefix][0])
filename = f'{{{0:0{number_of_zeros}d}..{last_tar:0{number_of_zeros}d}}}.tar'
filename = prepare_filename(prefix + filename)
data_str_components.append(filename)
data_str = '::'.join(data_str_components)
if weights is not None:
weights = '::'.join([weights for _ in data_str_components])
return data_str, weights
def path_or_cloudpath(s):
if re.match(r"^\w+://", s):
return CloudPath(s)
return Path(s)
def save_training_artifacts(args, config, checkpoint):
training_artifacts = {
'scale': args.scale,
'checkpoint': checkpoint,
'scale_config': config,
'data_dir': args.data_dir
}
artifacts_fname = checkpoint.parent.parent / 'info.pkl'
pickle.dump(training_artifacts, open(artifacts_fname, 'wb'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--scale',
type=str,
required=True,
choices=available_scales(),
help='Competition scale.'
)
parser.add_argument(
'--data_dir',
type=path_or_cloudpath,
required=True,
help='Path to directory where the data is stored. Multiple paths can be used, separated by "::".'
)
parser.add_argument(
"--data_weights",
type=str,
default=None,
help=(
"When using multiple data sources with webdataset and sampling with replacement, which weight to use for sampling the different data sources. "
"Similar to --data-dir, this should be a string with as many numbers as there are data sources, separated by `::` (e.g. 1::2::0.5) "
"By default, datapoints are sampled uniformly regardless of the dataset sizes."
)
)
parser.add_argument(
'--output_dir',
type=path_or_cloudpath,
required=True,
help='Path to directory where outputs will be stored.'
)
parser.add_argument(
'--exp_name',
type=str,
default=None,
help='Name of the experiment for logging.'
)
parser.add_argument(
'--use_cached_shards',
help='If true, re-use the re-sharded data if possible.',
action='store_true',
default=False
)
parser.add_argument(
'--wandb_project_name',
type=str,
default='datanet',
help='Name of the project if logging with wandb.'
)
parser.add_argument(
'--workers',
type=int,
default=4,
help='Number of workers for open_clip.'
)
parser.add_argument(
'--precision',
type=str, choices=["amp", "amp_bf16", "amp_bfloat16", "bf16", "fp16", "fp32"],
default="amp",
help="Floating point precision."
)
parser.add_argument(
'--num_checkpoints',
type=int,
default=5,
help="Number of times we save checkpoints during training."
)
parser.add_argument(
'--seed',
type=int,
default=0,
help="Random seed."
)
parser.add_argument(
"--dataset_resampled",
default=False,
action="store_true",
help="Whether to use sampling with replacement for webdataset shard selection."
)
parser.add_argument(
"--report_to_wandb",
default=False,
action="store_true",
help="If True, report to wandb."
)
parser.add_argument(
"--accum_freq",
type=int,
default=1,
help="Update the model every --acum-freq steps."
)
parser.add_argument(
"--log_every_n_steps",
type=int,
default=100,
help="Log every n steps to tensorboard/console/wandb.",
)
parser.add_argument(
"--resume",
default='latest',
type=str,
help="Path to checkpoint to resume from (default: latest checkpoint in the training directory).",
)
parser.add_argument(
"--imagenet_val",
type=str,
default=None,
help="Optional path to imagenet val set for conducting zero shot evaluation.",
)
parser.add_argument(
"--blur_field",
type=str,
default=None,
help="Name of the field in the webdataset json files with bounding boxes to blur."
)
parser.add_argument(
"--grad_clip_norm",
type=float,
default=None
)
parser.add_argument(
"--save_frequency",
type=int,
default=0
)
args = parser.parse_args()
data_dir = args.data_dir
_, rank, world_size = world_info_from_env()
if rank == 0:
print('Running training on scale', args.scale)
print(f'World size is {world_size}.')
config = get_scale_config(args.scale)
learning_rate = config['learning_rate']
global_batch_size = config['batch_size']
warmup = config['warmup']
model = config['model']
beta2 = config['beta2']
train_num_samples = config['train_num_samples']
train_data, weights = get_input_shards(data_dir, args.data_weights)
exp_name = args.exp_name if args.exp_name else f'{args.scale}_scale'
log_dir = args.output_dir
per_gpu_batch_size = global_batch_size // (world_size * args.accum_freq)
main_args = [
'--save-frequency', f'{args.save_frequency}',
'--ddp-static-graph',
'--local-loss',
'--gather-with-grad',
'--grad-checkpointing',
'--train-data', f'{train_data}',
'--train-num-samples', f'{train_num_samples // args.num_checkpoints}',
'--warmup', f'{warmup}',
'--dataset-type', 'webdataset',
'--precision', f'{args.precision}',
'--workers', f'{args.workers}',
'--model', f'{model}',
'--batch-size', f'{per_gpu_batch_size}',
'--epochs', f'{args.num_checkpoints}',
'--lr', f'{learning_rate}',
'--logs', f'{log_dir}',
'--name', f'{exp_name}',
'--seed', f'{args.seed}',
'--accum-freq', f'{args.accum_freq}',
'--log-every-n-steps', f'{args.log_every_n_steps}',
'--save-most-recent',
'--resume', f'{args.resume}'
]
if args.dataset_resampled:
main_args.append('--dataset-resampled')
if args.report_to_wandb:
main_args.extend(['--report-to', 'wandb', '--wandb-project-name', f'{args.wandb_project_name}'])
if args.imagenet_val is not None:
main_args.extend(['--imagenet-val', args.imagenet_val])
if args.blur_field is not None:
main_args.extend(['--blur-field', args.blur_field])
if beta2 is not None:
main_args.extend(['--beta2', f'{beta2}'])
if weights is not None:
main_args.extend(['--train-data-upsampling-factors', weights])
if args.grad_clip_norm is not None:
main_args.extend(['--grad-clip-norm', f'{args.grad_clip_norm}'])
success = main(main_args)
if rank == 0:
if success == -1:
print('Error running training. Exiting.')
final_checkpoint = log_dir / exp_name / 'checkpoints' / f'epoch_latest.pt'
assert final_checkpoint.exists(), f'Did not find the checkpoint at {final_checkpoint}'
save_training_artifacts(args, config, final_checkpoint)
print('Done training.')
|
EXA-1-master
|
exa/datasets/datacomp/train.py
|
import argparse
import json
import os
import copy
import pickle
import requests
import yaml
import warnings
import shutil
import json
import re
import time
from time import gmtime, strftime
import numpy as np
from requests.structures import CaseInsensitiveDict
from huggingface_hub import Repository
from huggingface_hub import upload_file, dataset_info, delete_folder, HfApi, CommitOperationAdd
from scale_configs import get_scale_config, available_scales
from pathlib import Path
from cloudpathlib import CloudPath
from eval_utils.main import evaluate_model
warnings.filterwarnings("ignore", message="Length of IterableDataset")
def path_or_cloudpath(s):
if re.match(r"^\w+://", s):
return CloudPath(s)
return Path(s)
def submit_to_firebase(training_info, args, results):
timestamp = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
data = {
'scale': training_info['scale'],
'model': training_info['scale_config']['model'],
'dataset_size': args.dataset_size,
'checkpoint': str(training_info['checkpoint']),
'batch_size': training_info['scale_config']['batch_size'],
'learning_rate': training_info['scale_config']['learning_rate'],
'train_num_samples': training_info['scale_config']['train_num_samples'],
'method_name': args.method_name,
'author': args.author,
'email': args.email,
'hf_username': args.hf_username,
'hf_repo_name': args.hf_repo_name,
'timestamp': timestamp
}
for dataset_name, dataset_results in results.items():
if 'main_metric' in dataset_results['metrics']:
metric = dataset_results['metrics']['main_metric']
if metric is not None:
data[dataset_name] = metric
hf_hub_username = data['hf_username']
hf_hub_dirname = data['hf_repo_name']
key = f'{hf_hub_username}__{hf_hub_dirname}__{timestamp}'
url = f"https://laion-tng-default-rtdb.firebaseio.com/{key}.json"
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
json_data = json.dumps(data)
resp = requests.put(url, headers=headers, data=json_data)
return resp
def submit_to_slack(train_info, args, results):
scale = train_info.get('scale', 'undefined')
hf_hub_username = args.hf_username
hf_hub_dirname = args.hf_repo_name
hf_url = f'https://huggingface.co/{hf_hub_username}/{hf_hub_dirname}'
avg_acc = np.mean([
val['metrics']['main_metric']
for val in results.values()
if val['metrics']['main_metric'] is not None
])
imagenet_acc = results['ImageNet 1k']['metrics']['acc1']
message = (
f'New submission ({scale} scale): {args.method_name}. '
f'ImageNet accuracy: {imagenet_acc:.3f}. Average performance {avg_acc:.3f}. '
f'From {args.author} ({args.email}).'
)
if not args.skip_hf:
message = message[:-1] + f', more details at {hf_url}'
root = 'hooks.slack.com'
part1 = 'T01AEJ66KHV'
part2 = 'B055EQE8U8N'
part3 = 'mgVJURCYuDirvkvyZ8wkuDwg'
url = f"https://{root}/services/{part1}/{part2}/{part3}"
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
data = json.dumps({'text': message})
resp = requests.put(url, headers=headers, data=data)
return resp
def push_files_to_hub(train_info, args, results_filename):
if '::' in str(args.samples):
sample_files = [path_or_cloudpath(subdir) for subdir in str(args.samples).split('::')]
else:
sample_files = [args.samples]
if len(sample_files) == 0:
raise FileNotFoundError(
f'Expected one or more files containing the sample ids but found none.'
)
hf_api = HfApi()
repo_id = args.hf_username + '/' + args.hf_repo_name
print(f'Pushing files to HF Hub ({repo_id}). This may take a while.')
results_filename = str(results_filename)
scale = train_info['scale']
prefix = f'{scale}_scale'
operations = [
CommitOperationAdd(path_or_fileobj=results_filename, path_in_repo=f'{prefix}/results.jsonl'),
]
if args.upload_checkpoint:
model_checkpoint = str(train_info['checkpoint'])
operations.append(CommitOperationAdd(path_or_fileobj=model_checkpoint, path_in_repo=f'{prefix}/checkpoint.pt'))
for filename in sample_files:
fileobj = filename.read_bytes()
operations.append(
CommitOperationAdd(path_or_fileobj=fileobj, path_in_repo=f'{prefix}/samples/{filename.name}')
)
hf_api.create_commit(
repo_id=repo_id,
operations=operations,
commit_message=f'Upload artifacts ({scale} scale)'
)
print('Done uploading files to HF Hub.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--track', type=str, required=False, choices=['filtering', 'byod'], help='Competition track.')
parser.add_argument('--train_output_dir', required=True, help='Path to output directory from training.')
parser.add_argument('--output_dir', default=None, help='Path to output directory to use for evaluation. If nothing is passed, use the training output dir.')
parser.add_argument('--data_dir', help='(Optional) Path to directory containing downloaded evaluation datasets.', default=None)
parser.add_argument('--batch_size', default=64, type=int, help="Batch size.")
# Submission flags
parser_submit = parser.add_argument_group('submission')
parser_submit.add_argument('--submit', help='If true, submit the entry to the leaderboard.', action='store_true', default=False)
parser_submit.add_argument('--method_name', type=str, help='Name of the method to be shown on the leaderboard. This *will* be shared publicly.', default=None)
parser_submit.add_argument('--author', type=str, help='Name or names of the authors of this submission. This *will* be shared publicly.', default=None)
parser_submit.add_argument('--email', type=str, help='Email for contact. This will *not* be shared publicly', default=None)
parser_submit.add_argument('--hf_username', type=str, help='HuggingFace username. This will *not* be shared publicly', default=None)
parser_submit.add_argument('--hf_repo_name', type=str, help='HuggingFace repository name. This will *not* be shared publicly', default=None)
parser_submit.add_argument('--dataset-size', type=str, default='', help='Optional size of the dataset.')
parser_submit.add_argument('--samples', type=path_or_cloudpath, help='Optional path to file(s) specifying the samples used for training. This must be specified.', default=None)
parser_submit.add_argument('--upload_checkpoint', help='Whether or not to upload the checkpoint with the trained model', action='store_true', default=False)
# Debug-only flags. Using any of these might invalidate your submission.
parser_debug = parser.add_argument_group('debug-only')
parser_debug.add_argument('--use_model', type=str, help='If set, manually specify a model architecture and checkpoint path ("model path")', default=None)
parser_debug.add_argument('--skip_hf', help='If true,inodes skip uploading files to HF Hub', action='store_true', default=False)
parser_debug.add_argument('--skip_db', help='If true, skip uploading information to databse', action='store_true', default=False)
parser_debug.add_argument('--skip_notification', help='If true, skip notifying us from your submission', action='store_true', default=False)
args = parser.parse_args()
args.train_output_dir = Path(args.train_output_dir)
if args.output_dir is None:
args.output_dir = args.train_output_dir
args.output_dir = Path(args.output_dir)
if args.use_model is not None:
args.train_output_dir = args.output_dir
# Generate barebones info.pkl
model_arch, model_checkpoint = args.use_model.split(maxsplit=1)
Path.mkdir(args.output_dir, parents=True, exist_ok=True)
with open(args.train_output_dir / 'info.pkl', 'wb') as f:
pickle.dump({
'scale_config': {'model': model_arch},
'checkpoint': model_checkpoint
}, f)
if args.submit:
assert args.method_name is not None, 'Please specify your method name with --method_name for a valid submission.'
assert args.author is not None, 'Please specify your author name with --author for a valid submission.'
assert args.email is not None, 'Please specify your email with --email for a valid submission.'
assert args.hf_username is not None, 'Please specify your huggingface username with --method_name for a valid submission.'
assert args.hf_repo_name is not None, 'Please specify your huggingface repo name with --hf_repo_name for a valid submission.'
# Read training information
train_info_filename = args.train_output_dir / 'info.pkl'
train_info = pickle.load(open(train_info_filename, 'rb'))
results_filename = args.output_dir / 'eval_results.jsonl'
# Get list of datasets
with open(os.path.join(os.path.dirname(__file__), 'tasklist.yml')) as f:
tasks = yaml.safe_load(f)
if not args.submit:
tasks = {key: val for key, val in tasks.items() if 'val_task' in val['tags']}
# Check for cached results
results = {}
cached_train_info_filename = args.output_dir / 'info.pkl'
if args.output_dir.exists() and cached_train_info_filename.exists():
# If the output directory already exists, the training information should match.
cached_train_info = pickle.load(open(cached_train_info_filename, 'rb'))
error_message = (
'Error: output directory exists, but the training configs do not match. '
'If you are re-using an output directory for evals, please be sure that '
'the training output directory is consistent.')
assert cached_train_info == train_info, error_message
# Read existing results
if results_filename.exists():
with open(results_filename, 'r') as f:
lines = [json.loads(s) for s in f.readlines()]
for line in lines:
if line['key'] not in tasks:
continue
results[line['dataset']] = line
print(f'Found {len(results)} eval result(s) in {results_filename}.')
else:
Path.mkdir(args.output_dir, parents=True, exist_ok=True)
pickle.dump(train_info, open(cached_train_info_filename, 'wb'))
train_checkpoint = Path(train_info['checkpoint'])
try:
exists = Path(train_info['checkpoint']).exists()
except:
exists = False
if not exists and args.use_model is None:
print('Warning, did not find or could not read checkpoint at', train_info['checkpoint'])
default_checkpoint_name = args.train_output_dir / 'checkpoints' / 'epoch_latest.pt'
print('Defaulting to', default_checkpoint_name)
train_info['checkpoint'] = default_checkpoint_name
print('Evaluating')
starttime = int(time.time())
for task_key in tasks:
task_name = tasks[task_key].get('name', task_key)
if task_name in results:
print(f'Skipping {task_name} since results are already in {results_filename}')
else:
print(f'Evaluating on {task_name}')
metrics = evaluate_model(
task_key,
train_info,
args.data_dir,
tasks[task_key].get("size"),
batch_size=args.batch_size
)
metrics['main_metric'] = metrics.get(tasks[task_key].get("main_metric", "acc1"))
results[task_name] = {
'key': task_key,
'dataset': task_name,
'metrics': metrics,
}
with open(results_filename, 'a+') as f:
f.write(json.dumps(results[task_name])+'\n')
if results[task_name]['metrics']['main_metric'] is not None:
print(f"Score: {results[task_name]['metrics']['main_metric']:.4f}")
else:
print(f"Score: No summary metric")
elapsed = int(time.time()) - starttime
print(f"Evaluation time: {elapsed // 3600} hour(s) {elapsed % 3600 // 60} minute(s) {elapsed % 60} second(s)")
print()
print("=== Final results ===")
for line in results.values():
print(f"{line['dataset']}: {line['metrics']['main_metric']}")
if args.submit:
print("=====================")
average = np.mean([
val['metrics']['main_metric']
for val in results.values()
if val['metrics']['main_metric'] is not None
])
print(f"Average: {average}")
if args.submit:
print('Done with evaluations. Preparing your submission...')
# Push models, results to HF Hub
if not args.skip_hf:
push_files_to_hub(train_info, args, results_filename)
error_msg = """
Error: something went wrong when submitting your results.
Please check if your HF credentials are correct, and contact the team if errors persist.
"""
error_msg = '='*100 + '\n' + error_msg + '\n' + '='*100
# Submit jsonl to firebase
if not args.skip_db:
resp = submit_to_firebase(train_info, args, results)
if resp.status_code != 200:
print(error_msg)
import sys; sys.exit()
# Slack notification
if not args.skip_notification:
resp = submit_to_slack(train_info, args, results)
if resp.status_code != 200:
print(error_msg)
import sys; sys.exit()
print('Sucessfully submitted your results. Thanks for participating, and good luck!')
|
EXA-1-master
|
exa/datasets/datacomp/evaluate.py
|
import argparse
import os
import sys
import yaml
VERBOSE = False
def main(args):
global VERBOSE
VERBOSE = args.verbose
download_datasets(args.data_dir)
def wget(src, dst, verbose=False):
vflag = "v" if VERBOSE or verbose else "nv"
os.system(f"wget -{vflag} '{src}' -O '{dst}'")
def download_datasets(data_dir):
local_urls = []
# Get list of datasets
with open("tasklist.yml") as f:
tasks = yaml.safe_load(f)
for task, task_info in tasks.items():
task_name = task_info.get('name', task)
if task.startswith("fairness/") or task.startswith("retrieval/") or task.startswith("misc/"):
task = task.split("/", 1)[1]
dir_name = f"wds_{task.replace('/', '-')}_test"
source_url = f"https://huggingface.co/datasets/djghosh/{dir_name}"
target_path = os.path.join(data_dir, dir_name)
try:
print()
print(f"""{f" Download '{task_name}' ":=^40s}""")
print()
# Create directory
os.makedirs(os.path.join(target_path, "test"), exist_ok=True)
# Download metadata
wget(
os.path.join(source_url, "raw/main/classnames.txt"),
os.path.join(target_path, "classnames.txt")
)
wget(
os.path.join(source_url, "raw/main/zeroshot_classification_templates.txt"),
os.path.join(target_path, "zeroshot_classification_templates.txt")
)
wget(
os.path.join(source_url, "raw/main/test/nshards.txt"),
os.path.join(target_path, "test/nshards.txt")
)
# Get nshards
with open(os.path.join(target_path, "test/nshards.txt")) as f:
nshards = int(f.read())
local_urls.append(os.path.join(target_path, f"test/{{0..{nshards-1}}}.tar"))
# Check and optionally download TARs
for index in range(nshards):
local_tar_path = os.path.join(target_path, f"test/{index}.tar")
if os.path.exists(local_tar_path):
# Check existing TAR
# Get expected size and checksum
with os.popen(f"curl -s '{os.path.join(source_url, f'raw/main/test/{index}.tar')}'") as tar_output:
tar_info = dict([line.split(maxsplit=1) for line in tar_output.read().splitlines()])
exp_checksum = tar_info['oid'].split(":")[1]
exp_size = int(tar_info['size'])
# Compute true size and checksum
with os.popen(f"sha256sum '{local_tar_path}'") as sha_output:
true_checksum = sha_output.read().split()[0]
true_size = os.path.getsize(local_tar_path)
# If equal, skip
if true_checksum == exp_checksum and true_size == exp_size:
print(f"Verified test/{index}.tar")
continue
# TAR is corrupt or does not exist, download
wget(
os.path.join(source_url, f"resolve/main/test/{index}.tar"),
local_tar_path,
verbose=True
)
print("Successfully downloaded dataset")
except Exception as e:
print("Failed to download dataset, check write permissions and Internet connection", file=sys.stderr)
print(e)
print()
# Print all local URLs
print("Paths to all downloaded TAR files:")
print(*local_urls, sep="\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Download all data comp evaluation datasets")
parser.add_argument("data_dir", help="Root directory into which all datasets will be downloaded")
parser.add_argument("--verbose", "-v", action="store_true", help="Print verbose download status")
args = parser.parse_args()
sys.exit(main(args))
|
EXA-1-master
|
exa/datasets/datacomp/download_evalsets.py
|
SCALE_CONFIGS = {
"debug": {
'batch_size': 1024,
'learning_rate': 1e-4,
'train_num_samples': 128_000,
'warmup': 500,
'model': 'ViT-B-32',
'beta2': None
},
"small": {
'batch_size': 4096,
'learning_rate': 5e-4,
'train_num_samples': 12_800_000,
'warmup': 500,
'model': 'ViT-B-32',
'beta2': None,
},
"medium": {
'batch_size': 4096,
'learning_rate': 5e-4,
'train_num_samples': 128_000_000,
'warmup': 500,
'model': 'ViT-B-32',
'beta2': None
},
"large": {
'batch_size': 8192,
'learning_rate': 5e-4,
'train_num_samples': 1_280_000_000,
'warmup': 500,
'model': 'ViT-B-16',
'beta2': None
},
"xlarge": {
'batch_size': 90112,
'learning_rate': 1e-3,
'train_num_samples': 12_800_000_000,
'warmup': 10000,
'model': 'ViT-L-14',
'beta2': 0.95
}
}
SIMPLE_NAMES = ['debug', 'small', 'medium', 'large', 'xlarge']
def available_scales(simple_names = False):
if simple_names:
return SIMPLE_NAMES
else:
return sorted(list(SCALE_CONFIGS.keys()))
def get_scale_config(scale):
if scale not in SCALE_CONFIGS:
raise ValueError(f'Unknown scale: {scale}. Please use one of {available_scales()}')
return SCALE_CONFIGS[scale]
|
EXA-1-master
|
exa/datasets/datacomp/scale_configs.py
|
import argparse
import json
import pandas as pd
FLOAT_FMT = lambda frac: f"{100 * frac:.1f}"
def generate_tables(metrics, dataset):
if dataset == "fairness/fairface":
RACES = ['black', 'white', 'indian', 'latino', 'middle eastern', 'southeast asian', 'east asian']
elif dataset == "fairness/utkface":
RACES = ['black', 'white', 'indian', 'asian', 'other']
else:
raise ValueError("dataset not recognized")
# Table 3+4, percent accuracy on Race, Gender and Age, comparing White vs. Non-white
print("# Table 3+4")
table34 = pd.DataFrame({
objective: [
metrics[f'acc_{objective}_{label}']
for label in ['race_binary:0', 'race_binary:1', 'avg']
]
for objective in ['race', 'gender', 'age']
}, index=['white', 'non-white', 'overall'])
print(table34.to_string(float_format=FLOAT_FMT))
print()
# Table 5, gender classification on intersectional race and gender categories
print("# Table 5")
table5 = pd.DataFrame(
[
[
metrics[f'acc_gender_x_race:{race_label}_gender:{gender_label}']
for race_label in range(len(RACES))
]
for gender_label in range(2)
], index=[
'male', 'female'
], columns=RACES
)
print(table5.to_string(float_format=FLOAT_FMT))
print()
# Table 6, toxic misclassification by race
print("# Table 6")
table6 = pd.DataFrame(
[
[
metrics[f'toxicity_{toxic_label}_race:{race_label}']
for race_label in range(len(RACES))
]
for toxic_label in ['crime', 'nonhuman']
], index=[
'crime-related', 'non-human'
], columns=RACES
)
print(table6.to_string(float_format=FLOAT_FMT))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("results_file", type=str, help="path to eval_results.jsonl")
parser.add_argument("--dataset", type=str, default="fairness/fairface", help="dataset to use")
args = parser.parse_args()
with open(args.results_file) as f:
for line in f:
results = json.loads(line)
if results['key'] == args.dataset:
metrics = results['metrics']
generate_tables(metrics, args.dataset)
break
else:
print("N/A")
|
EXA-1-master
|
exa/datasets/datacomp/eval_utils/fairness_tables.py
|
"""Evaluate on standard classification webdatasets."""
import os
import torch
import open_clip
from sklearn.metrics import balanced_accuracy_score
from clip_benchmark.datasets.builder import build_dataset
from clip_benchmark.metrics import zeroshot_classification as zsc
def create_model(model_arch, model_path):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(0)
model_path = str(model_path)
model, _, transform = open_clip.create_model_and_transforms(model_arch, pretrained=model_path)
model.eval()
# model.half()
model = model.to(device)
return model, transform, device
def create_webdataset(task, transform, data_root=None, dataset_len=None, batch_size=64, num_workers=4):
data_folder = f"wds_{task.replace('/','-')}_test"
if data_root is None:
data_root = f'https://huggingface.co/datasets/djghosh/{data_folder}/tree/main'
else:
data_root = os.path.join(data_root, data_folder)
dataset = build_dataset(
dataset_name=f"wds/{task}",
root=data_root,
transform=transform,
split="test",
download=False,
)
if dataset_len:
dataset = dataset.with_length((dataset_len + batch_size - 1) // batch_size)
dataloader = torch.utils.data.DataLoader(
dataset.batched(batch_size), batch_size=None,
shuffle=False, num_workers=num_workers,
)
return dataset, dataloader
def evaluate_webdataset(
task, model_arch, model_path, data_root=None,
dataset_len=None, batch_size=64, num_workers=4,
return_preds=False, return_topk=False):
"""Evaluate CLIP model on classification task."""
# Create model
model, transform, device = create_model(model_arch, model_path)
# Load data
dataset, dataloader = create_webdataset(
task, transform, data_root,
dataset_len, batch_size, num_workers
)
zeroshot_templates = dataset.templates if hasattr(dataset, 'templates') else None
classnames = dataset.classes if hasattr(dataset, 'classes') else None
assert (zeroshot_templates is not None and classnames is not None), 'Dataset does not support classification'
# Evaluate
classifier = zsc.zero_shot_classifier(
model,
open_clip.get_tokenizer(model_arch),
classnames,
zeroshot_templates,
device
)
logits, target = zsc.run_classification(model, classifier, dataloader, device, amp=False)
with torch.no_grad():
pred = logits.argmax(axis=1).cpu()
target = target.cpu()
# Compute metrics
if len(dataset.classes) >= 5:
acc1, acc5 = zsc.accuracy(logits, target, topk=(1, 5))
else:
acc1, = zsc.accuracy(logits, target, topk=(1,))
acc5 = None
mean_per_class_recall = balanced_accuracy_score(target, pred)
metrics = {
"acc1": acc1,
"acc5": acc5,
"mean_per_class_recall": mean_per_class_recall
}
if return_preds:
if return_topk:
with torch.no_grad():
_, topk_pred = torch.topk(logits, int(return_topk), dim=1)
topk_pred = topk_pred.cpu()
return metrics, topk_pred, target
return metrics, pred, target
return metrics
|
EXA-1-master
|
exa/datasets/datacomp/eval_utils/wds_eval.py
|
# from collections import Counter
from sklearn.metrics import jaccard_score
import numpy as np
from tqdm import tqdm
import torch
import open_clip
import datasets
# from transformers import CLIPModel, CLIPProcessor
from .wds_eval import create_model
class WinoDataset(torch.utils.data.Dataset):
def __init__(self, hf_dataset, transform=None, text_transform=None):
super().__init__()
self._dataset = hf_dataset
self.transform = (lambda x: x) if transform is None else transform
self.text_transform = (lambda x: x) if text_transform is None else text_transform
def __len__(self):
return len(self._dataset)
def __getitem__(self, index: int):
example = self._dataset[index]
return (
self.transform(example['candidate_images']),
self.text_transform(example['cue']),
np.isin(example['candidates'], example['associations'])
)
def evaluate_winogavil_dataset(
model_arch, model_path, data_root=None,
num_workers=4, batch_size=None):
model, transform, device = create_model(model_arch, model_path)
tokenizer = open_clip.get_tokenizer(model_arch)
# Load data
dataset = WinoDataset(
datasets.load_dataset(
"nlphuji/winogavil",
split="test",
# cache_dir=data_root
),
transform=lambda imgs: torch.stack([transform(img) for img in imgs]),
text_transform=lambda text: tokenizer([get_clip_prompt(text)])
)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1,
shuffle=False, num_workers=num_workers,
collate_fn=lambda batch: batch[0]
)
all_groups = []
all_scores = []
# Iterate WinoGAViL Instances
for idx, (images, text, y_true) in enumerate(tqdm(dataloader)):
# Get example
n_images = len(images)
n_assoc = y_true.sum()
# Featurize
with torch.no_grad(), torch.cuda.amp.autocast():
image_features = model.encode_image(images.to(device), normalize=True)
text_features = model.encode_text(text.to(device), normalize=True)
# Compute similarities
image_logits = (text_features @ image_features.T).squeeze(0).cpu().numpy()
# Select topk
topk_indices = np.argsort(image_logits)[-n_assoc:]
y_pred = np.isin(np.arange(n_images), topk_indices)
# Evaluate with Jaccard
score = jaccard_score(y_true, y_pred)
all_scores.append(score)
all_groups.append(n_images)
if idx > 0 and idx % 100 == 0:
print(f"idx: {idx}, current Jaccard index average: {np.mean(all_scores)}")
all_groups = np.array(all_groups)
all_scores = np.array(all_scores)
return {
"avg_jaccard_score": all_scores.mean(),
"jaccard_score_5": all_scores[all_groups == 5].mean(),
"jaccard_score_6": all_scores[all_groups == 6].mean(),
"jaccard_score_10": all_scores[all_groups == 10].mean(),
"jaccard_score_12": all_scores[all_groups == 12].mean(),
"jaccard_score_5-6": all_scores[all_groups <= 6].mean(),
"jaccard_score_10-12": all_scores[all_groups >= 10].mean(),
}
# def solve_winogavil_instance(clip_model, clip_processor, cue, num_associations, candidates, candidates_images):
# clip_text = get_clip_txt(cue)
# sim_for_image = {}
# for img_name, img in zip(candidates, candidates_images):
# processed_cue_img = clip_processor(text=[clip_text], images=img, return_tensors="pt")
# output_cue_img = clip_model(**processed_cue_img).logits_per_image.item()
# sim_for_image[img_name] = output_cue_img
# sorted_sim_for_image = Counter(sim_for_image).most_common()[:num_associations]
# clip_predictions = [x[0] for x in sorted_sim_for_image]
# return clip_predictions
def get_clip_prompt(item):
item = item.lower()
vowels = ["a", "e", "i", "o", "u"]
if item[0] in vowels:
clip_txt = f"An {item}"
else:
clip_txt = f"A {item}"
return clip_txt
# def get_vectors_similarity(v1, v2):
# similarity = v1.detach().numpy() @ v2.detach().numpy().T
# similarity_item = similarity.item()
# return similarity_item
# def get_jaccard(s1, s2):
# s1 = set(s1)
# s2 = set(s2)
# jaccard = int(len(s1.intersection(s2)) / len(s1.union(s2)) * 100)
# return jaccard
|
EXA-1-master
|
exa/datasets/datacomp/eval_utils/wino_eval.py
|
# Main branching point for evaluating on different datasets
from .wds_eval import evaluate_webdataset
from .retr_eval import evaluate_retrieval_dataset
from .wilds_eval import evaluate_wilds_dataset
from .fairness_eval import evaluate_dollar_street_dataset, evaluate_geode_dataset, evaluate_fairface_dataset
from .wino_eval import evaluate_winogavil_dataset
def evaluate_model(task_key, train_info, data_root, dataset_size, batch_size=64):
if task_key.startswith("retrieval/"):
metrics = evaluate_retrieval_dataset(
task_key,
train_info['scale_config']['model'],
train_info['checkpoint'],
data_root=data_root,
batch_size=batch_size,
)
elif task_key.startswith("wilds/"):
metrics = evaluate_wilds_dataset(
task_key,
train_info['scale_config']['model'],
train_info['checkpoint'],
data_root=data_root,
dataset_len=dataset_size,
batch_size=batch_size
)
elif task_key.startswith("fairness/"):
eval_fn = {
"fairness/dollar_street": evaluate_dollar_street_dataset,
"fairness/geode": evaluate_geode_dataset,
"fairness/fairface": evaluate_fairface_dataset,
"fairness/utkface": evaluate_fairface_dataset,
}.get(task_key)
if eval_fn is not None:
metrics = eval_fn(
task_key,
train_info['scale_config']['model'],
train_info['checkpoint'],
data_root=data_root,
dataset_len=dataset_size,
batch_size=batch_size,
)
else:
metrics = {}
elif task_key.startswith("misc/"):
if task_key == "misc/winogavil":
metrics = evaluate_winogavil_dataset(
train_info['scale_config']['model'],
train_info['checkpoint'],
data_root=data_root,
batch_size=batch_size,
)
else:
metrics = {}
else:
metrics = evaluate_webdataset(
task_key,
train_info['scale_config']['model'],
train_info['checkpoint'],
data_root=data_root,
dataset_len=dataset_size,
batch_size=batch_size
)
return metrics
|
EXA-1-master
|
exa/datasets/datacomp/eval_utils/main.py
|
from collections import defaultdict
from contextlib import suppress
from .wds_eval import *
from .wilds_eval import *
# Dollar Street
class TopKAccuracy(Accuracy):
def __init__(self, prediction_fn=None, name=None):
if name is None:
name = 'acc_topk'
super().__init__(name=name)
def _compute_element_wise(self, y_pred, y_true):
if self.prediction_fn is not None:
y_pred = self.prediction_fn(y_pred)
return (y_pred == y_true.unsqueeze(-1)).any(-1).float()
class DollarStreetEvaluator(WILDSEvaluator):
def __init__(self, metadata):
super().__init__(metadata)
self._metadata_fields = ['income_ds', 'income_meta', 'region']
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=['income_ds'])
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
metric = TopKAccuracy(prediction_fn=prediction_fn, name="acc_top5")
return self.standard_group_eval(
metric,
self._eval_grouper,
y_pred, y_true, metadata)
EVALUATORS["fairness/dollar_street"] = DollarStreetEvaluator
def evaluate_dollar_street_dataset(
task, model_arch, model_path, data_root=None,
dataset_len=None, batch_size=64, num_workers=4):
"""Evaluate CLIP model on Dollar Street classification task."""
# Evaluate
metrics, y_pred, y_target = evaluate_webdataset(
task.replace("fairness/", ""), model_arch, model_path, data_root,
dataset_len, batch_size, num_workers,
return_preds=True, return_topk=5
)
# Load additional metadata
print("Reading additional metadata")
metadata_loader = create_metadata_loader(
task.replace("fairness/", ""), data_root,
dataset_len, batch_size, num_workers
)
# Check metadata
y_array = []
metadata_array = []
for label, metadata in metadata_loader:
y_array.append(label)
metadata_array.append(metadata)
# assert (y_target == np.array(y_array)).all(), "Labels do not match"
metadata = torch.cat(metadata_array)
# Compute additional metrics
evaluator = EVALUATORS[task](metadata)
metrics.update(evaluator.eval(y_pred, y_target, metadata)[0])
return metrics
# GeoDE
class GeoDEEvaluator(WILDSEvaluator):
def __init__(self, metadata):
super().__init__(metadata)
self._metadata_fields = ['region', 'country']
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=['region'])
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
metric = Accuracy(prediction_fn=prediction_fn)
return self.standard_group_eval(
metric,
self._eval_grouper,
y_pred, y_true, metadata)
EVALUATORS["fairness/geode"] = GeoDEEvaluator
def evaluate_geode_dataset(
task, model_arch, model_path, data_root=None,
dataset_len=None, batch_size=64, num_workers=4):
"""Evaluate CLIP model on GeoDE classification task."""
# Evaluate
metrics, y_pred, y_target = evaluate_webdataset(
task.replace("fairness/", ""), model_arch, model_path, data_root,
dataset_len, batch_size, num_workers,
return_preds=True
)
# Load additional metadata
print("Reading additional metadata")
metadata_loader = create_metadata_loader(
task.replace("fairness/", ""), data_root,
dataset_len, batch_size, num_workers
)
# Check metadata
y_array = []
metadata_array = []
for label, metadata in metadata_loader:
y_array.append(label)
metadata_array.append(metadata)
# assert (y_target == np.array(y_array)).all(), "Labels do not match"
metadata = torch.cat(metadata_array)
# Compute additional metrics
evaluator = EVALUATORS[task](metadata)
metrics.update(evaluator.eval(y_pred, y_target, metadata)[0])
return metrics
# FairFace
FF_PRED_LABELS = ["race", "gender", "age", "toxic"]
class FairFaceEvaluator(WILDSEvaluator):
def __init__(self, metadata):
super().__init__(metadata)
self._metadata_fields = ['age', 'gender', 'race', 'race_binary']
self._first_eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=['race_binary'])
self._second_eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=['gender', 'race'])
self._third_eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=['race'])
def eval(self, y_pred, _, metadata):
metrics = {}
# Table 3, 4: Classify race, gender, age; group by white vs non-white
metric = Accuracy(name=f"acc_race")
metrics.update(self.standard_group_eval(
metric,
self._first_eval_grouper,
(y_pred[:, FF_PRED_LABELS.index("race")] != 1).int(),
metadata[:, self.metadata_fields.index("race_binary")],
metadata)[0])
for label in ["gender", "age"]:
metric = Accuracy(name=f"acc_{label}")
metrics.update(self.standard_group_eval(
metric,
self._first_eval_grouper,
y_pred[:, FF_PRED_LABELS.index(label)],
metadata[:, self.metadata_fields.index(label)],
metadata)[0])
# Table 5: Classify gender; group by race x gender
metric = Accuracy(name=f"acc_gender_x")
metrics.update(self.standard_group_eval(
metric,
self._second_eval_grouper,
y_pred[:, FF_PRED_LABELS.index("gender")],
metadata[:, self.metadata_fields.index("gender")],
metadata)[0])
# Table 6
toxic_preds = y_pred[:, FF_PRED_LABELS.index("toxic")]
metric = Accuracy(name=f"toxicity_crime")
metrics.update(self.standard_group_eval(
metric,
self._third_eval_grouper,
(toxic_preds >= 18).int(),
torch.ones_like(toxic_preds, dtype=torch.int),
metadata)[0])
metric = Accuracy(name=f"toxicity_nonhuman")
metrics.update(self.standard_group_eval(
metric,
self._third_eval_grouper,
((toxic_preds >= 14) & (toxic_preds < 18)).int(),
torch.ones_like(toxic_preds, dtype=torch.int),
metadata)[0])
return metrics
EVALUATORS["fairness/fairface"] = FairFaceEvaluator
EVALUATORS["fairness/utkface"] = FairFaceEvaluator
def evaluate_fairface_dataset(
task, model_arch, model_path, data_root=None,
dataset_len=None, batch_size=64, num_workers=4):
"""Evaluate CLIP model on FairFace or UTK Faces classification tasks."""
# Create model
model, transform, device = create_model(model_arch, model_path)
# Load data
dataset, _ = create_webdataset(
task.replace("fairness/", ""), None, data_root,
dataset_len, batch_size, num_workers
)
# Get templates and classnames: separate for each task
zeroshot_templates = dataset.templates if hasattr(dataset, 'templates') else None
classnames = dataset.classes if hasattr(dataset, 'classes') else None
assert (zeroshot_templates is not None and classnames is not None), 'Dataset does not support classification'
multilabel = defaultdict(lambda: dict(classnames=[], zeroshot_templates=[]))
for t in zeroshot_templates:
objective, template = t.split(":", 1)
multilabel[objective]['zeroshot_templates'].append(template)
for c in classnames:
objective, classname = c.split(":", 1)
multilabel[objective]['classnames'].append(classname)
# Load metadata and not classes
dataset.pipeline = dataset.pipeline[:5] # This will break if webdataset changes
dataset = (
dataset
.to_tuple(["webp", "png", "jpg", "jpeg"], "npy")
.map_tuple(transform, None)
)
if dataset_len:
dataset = dataset.with_length((dataset_len + batch_size - 1) // batch_size)
dataloader = torch.utils.data.DataLoader(
dataset.batched(batch_size), batch_size=None,
shuffle=False, num_workers=num_workers,
)
# Create classifier for each task
classifiers = []
n_classes = []
for objective in FF_PRED_LABELS:
info = multilabel[objective]
classifiers.append(zsc.zero_shot_classifier(
model,
open_clip.get_tokenizer(model_arch),
info['classnames'],
info['zeroshot_templates'],
device
))
n_classes.append(len(info['classnames']))
# Combine classifiers
multilabel_classifier = torch.zeros(
(len(classifiers), classifiers[0].shape[0], max(n_classes)),
dtype=classifiers[0].dtype, device=device
)
for idx, classifier in enumerate(classifiers):
multilabel_classifier[idx, :, :n_classes[idx]] = classifier
# Run classification
logits, target = run_multilabel_classification(model, multilabel_classifier, dataloader, device, amp=False)
with torch.no_grad():
# Replace invalid entries (past n_classes for each class)
INVALID = -1e9
invalid_mask = torch.arange(max(n_classes), device=device) >= torch.tensor(n_classes, device=device).unsqueeze(1)
logits[invalid_mask.expand(logits.shape[0], -1, -1)] = INVALID
# Compute predictions
y_pred = logits.argmax(axis=-1).cpu()
metadata = target.cpu()
# Compute metrics
evaluator = EVALUATORS[task](metadata)
metrics = evaluator.eval(y_pred, None, metadata)
return metrics
def run_multilabel_classification(model, classifier, dataloader, device, amp=True):
autocast = torch.cuda.amp.autocast if amp else suppress
pred = []
true = []
nb = 0
with torch.no_grad():
for images, target in tqdm(dataloader):
images = images.to(device)
target = target.to(device)
with autocast():
# predict
image_features = model.encode_image(images, normalize=True)
logits = 100. * torch.einsum("bf,mfc->bmc", image_features, classifier)
true.append(target.cpu())
pred.append(logits.float().cpu())
pred = torch.cat(pred)
true = torch.cat(true)
return pred, true
|
EXA-1-master
|
exa/datasets/datacomp/eval_utils/fairness_eval.py
|
"""Evaluate on image-text retrieval datasets."""
import torch
import open_clip
import datasets
from clip_benchmark.datasets.builder import image_captions_collate_fn
from clip_benchmark.metrics import zeroshot_retrieval as zsr
from .wds_eval import create_model
class RetrievalDataset(torch.utils.data.Dataset):
def __init__(self, hf_dataset, transform=None):
super().__init__()
self._dataset = hf_dataset
self.transform = (lambda x: x) if transform is None else transform
def __len__(self):
return len(self._dataset)
def __getitem__(self, index: int):
return (
self.transform(self._dataset[index]['image']),
self._dataset[index]['caption']
)
def evaluate_retrieval_dataset(
task, model_arch, model_path, data_root=None,
batch_size=64, num_workers=4):
"""Evaluate CLIP model on retrieval task."""
model, transform, device = create_model(model_arch, model_path)
tokenizer = open_clip.get_tokenizer(model_arch)
dataset = RetrievalDataset(
datasets.load_dataset(
f"nlphuji/{task.replace('retrieval/', '')}",
split="test",
# cache_dir=data_root
),
transform=transform
)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
shuffle=False, num_workers=num_workers,
collate_fn=image_captions_collate_fn
)
metrics = zsr.evaluate(
model,
dataloader,
tokenizer,
recall_k_list=[1, 5, 10],
device=device
)
metrics['mean_recall@1'] = 0.5 * (metrics['text_retrieval_recall@1'] + metrics['image_retrieval_recall@1'])
return metrics
|
EXA-1-master
|
exa/datasets/datacomp/eval_utils/retr_eval.py
|
# For evaluation of WILDS datasets
import os
import numpy as np
import torch
import webdataset as wds
from tqdm import tqdm
# Replace wilds function that requires torch_scatter
def _avg_over_groups(v, g, n_groups):
"""
Args:
v (Tensor): Vector containing the quantity to average over.
g (Tensor): Vector of the same length as v, containing group information.
Returns:
group_avgs (Tensor): Vector of length num_groups
group_counts (Tensor)
"""
assert v.device==g.device
assert v.numel()==g.numel()
group_count = wilds.common.utils.get_counts(g, n_groups)
# group_avgs = torch_scatter.scatter(src=v, index=g, dim_size=n_groups, reduce='mean')
group_avgs = torch.zeros(n_groups, dtype=torch.float, device=v.device).scatter_(0, index=g, src=v, reduce='add')
group_avgs /= group_count
return group_avgs, group_count
import wilds.common.utils
wilds.common.utils.avg_over_groups = _avg_over_groups
#
from wilds.datasets.wilds_dataset import WILDSDataset
from wilds.common.grouper import CombinatorialGrouper
from wilds.common.metrics.all_metrics import Accuracy, Recall, F1
from .wds_eval import create_webdataset, evaluate_webdataset
def create_metadata_loader(task, data_root=None, dataset_len=None, batch_size=64, num_workers=4):
dataset, _ = create_webdataset(
task, None, data_root,
dataset_len, batch_size, num_workers
)
# Load metadata (npy) and no images
dataset.pipeline = dataset.pipeline[:5] # This will break if webdataset changes
metadataset = dataset.to_tuple("cls", "npy")
if dataset_len:
dataset = dataset.with_length((dataset_len + batch_size - 1) // batch_size)
dataloader = torch.utils.data.DataLoader(
dataset.batched(batch_size), batch_size=None,
shuffle=False, num_workers=num_workers,
)
return dataloader
def evaluate_wilds_dataset(
task, model_arch, model_path, data_root=None,
dataset_len=None, batch_size=64, num_workers=4):
"""Evaluate CLIP model on WILDS classification task."""
# Evaluate
metrics, y_pred, y_target = evaluate_webdataset(
task, model_arch, model_path, data_root,
dataset_len, batch_size, num_workers,
return_preds=True
)
# Load additional metadata
print("Reading additional metadata")
metadata_loader = create_metadata_loader(
task, data_root,
dataset_len, batch_size, num_workers
)
# Check metadata
y_array = []
metadata_array = []
for label, metadata in metadata_loader:
y_array.append(label)
metadata_array.append(metadata)
# assert (y_target == np.array(y_array)).all(), "Labels do not match"
metadata = torch.cat(metadata_array)
# Compute additional metrics
wilds_evaluator = EVALUATORS[task](metadata)
metrics.update(wilds_evaluator.eval(y_pred, y_target, metadata)[0])
return metrics
# WILDS
class WILDSEvaluator(WILDSDataset):
def __init__(self, metadata):
self._metadata_array = metadata
# iWildCam
class IWildCamEvaluator(WILDSEvaluator):
def __init__(self, metadata):
super().__init__(metadata)
self._metadata_fields = ['location', 'sequence', 'year', 'month', 'day', 'hour', 'minute', 'second', 'y']
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=(['location']))
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
metrics = [
Accuracy(prediction_fn=prediction_fn),
Recall(prediction_fn=prediction_fn, average='macro'),
F1(prediction_fn=prediction_fn, average='macro'),
]
results = {}
for metric in metrics:
results.update({
**metric.compute(y_pred, y_true),
})
results_str = (
f"Average acc: {results[metrics[0].agg_metric_field]:.3f}\n"
f"Recall macro: {results[metrics[1].agg_metric_field]:.3f}\n"
f"F1 macro: {results[metrics[2].agg_metric_field]:.3f}\n"
)
return results, results_str
# Camelyon17
class Camelyon17Evaluator(WILDSEvaluator):
def __init__(self, metadata):
super().__init__(metadata)
self._metadata_fields = ['hospital', 'slide', 'y']
self._eval_grouper = CombinatorialGrouper(
dataset=self,
groupby_fields=['slide'])
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
metric = Accuracy(prediction_fn=prediction_fn)
return self.standard_group_eval(
metric,
self._eval_grouper,
y_pred, y_true, metadata)
# FMoW
class FMoWEvaluator(WILDSEvaluator):
def __init__(self, metadata):
super().__init__(metadata)
self._metadata_fields = ['region', 'year', 'y']
self._eval_groupers = {
'year': CombinatorialGrouper(dataset=self, groupby_fields=['year']),
'region': CombinatorialGrouper(dataset=self, groupby_fields=['region']),
}
def eval(self, y_pred, y_true, metadata, prediction_fn=None):
metric = Accuracy(prediction_fn=prediction_fn)
# Overall evaluation + evaluate by year
all_results, all_results_str = self.standard_group_eval(
metric,
self._eval_groupers['year'],
y_pred, y_true, metadata)
# Evaluate by region and ignore the "Other" region
region_grouper = self._eval_groupers['region']
region_results = metric.compute_group_wise(
y_pred,
y_true,
region_grouper.metadata_to_group(metadata),
region_grouper.n_groups)
all_results[f'{metric.name}_worst_year'] = all_results.pop(metric.worst_group_metric_field)
region_metric_list = []
for group_idx in range(region_grouper.n_groups):
group_str = region_grouper.group_field_str(group_idx)
group_metric = region_results[metric.group_metric_field(group_idx)]
group_counts = region_results[metric.group_count_field(group_idx)]
all_results[f'{metric.name}_{group_str}'] = group_metric
all_results[f'count_{group_str}'] = group_counts
if region_results[metric.group_count_field(group_idx)] == 0 or "Other" in group_str:
continue
all_results_str += (
f' {region_grouper.group_str(group_idx)} '
f"[n = {region_results[metric.group_count_field(group_idx)]:6.0f}]:\t"
f"{metric.name} = {region_results[metric.group_metric_field(group_idx)]:5.3f}\n")
region_metric_list.append(region_results[metric.group_metric_field(group_idx)])
all_results[f'{metric.name}_worst_region'] = metric.worst(region_metric_list)
all_results_str += f"Worst-group {metric.name}: {all_results[f'{metric.name}_worst_region']:.3f}\n"
return all_results, all_results_str
EVALUATORS = {
"wilds/iwildcam": IWildCamEvaluator,
"wilds/camelyon17": Camelyon17Evaluator,
"wilds/fmow": FMoWEvaluator,
}
|
EXA-1-master
|
exa/datasets/datacomp/eval_utils/wilds_eval.py
|
import bmtools
server = bmtools.ToolServer()
print(server.list_tools())
server.load_tool("chemical-prop")
server.load_tool("douban-film")
server.load_tool("weather")
server.load_tool("wikipedia")
server.load_tool("wolframalpha")
server.load_tool("bing_search")
server.load_tool("office-ppt")
server.load_tool("stock")
server.load_tool("map")
server.load_tool("nllb-translation")
server.load_tool("baidu-translation")
server.load_tool("tutorial")
server.serve()
|
EXA-1-master
|
exa/libraries/BMTools/host_local_tools.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'wikipedia', "http://127.0.0.1:8079/tools/wikipedia/"
tool_name, tool_config = load_single_tools(tool_name, tool_url)
print(tool_name, tool_config)
stqa = STQuestionAnswerer()
agent = stqa.load_tools(tool_name, tool_config, prompt_type="babyagi")
# agent = stqa.load_tools(tool_name, tool_config, prompt_type="react-with-tool-description")# prompt_type="babyagi")
agent("Where is Yaoming Born?")
|
EXA-1-master
|
exa/libraries/BMTools/test.py
|
import setuptools
with open("README.md", "r", encoding='utf8') as fh:
long_description = fh.read()
with open("requirements.txt", "r") as f:
requirements = f.read().splitlines()
setuptools.setup(
name="bmtools",
version="0.1.0",
author="OpenBMB",
author_email="shengdinghu@gmail.com",
description="API library for big models to use tools",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/OpenBMB/BMTools",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=requirements
)
|
EXA-1-master
|
exa/libraries/BMTools/setup.py
|
import gradio as gr
import sys
# sys.path.append('./inference/')
from bmtools.agent.tools_controller import MTQuestionAnswerer, load_valid_tools
from bmtools.agent.singletool import STQuestionAnswerer
from langchain.schema import AgentFinish
import os
import requests
available_models = ["ChatGPT", "GPT-3.5"]
DEFAULTMODEL = "GPT-3.5"
tools_mappings = {
"klarna": "https://www.klarna.com/",
"chemical-prop": "http://127.0.0.1:8079/tools/chemical-prop/",
"wolframalpha": "http://127.0.0.1:8079/tools/wolframalpha/",
"weather": "http://127.0.0.1:8079/tools/weather/",
"douban-film": "http://127.0.0.1:8079/tools/douban-film/",
"wikipedia": "http://127.0.0.1:8079/tools/wikipedia/",
"office-ppt": "http://127.0.0.1:8079/tools/office-ppt/",
"bing_search": "http://127.0.0.1:8079/tools/bing_search/",
"map": "http://127.0.0.1:8079/tools/map/",
"stock": "http://127.0.0.1:8079/tools/stock/",
"baidu-translation": "http://127.0.0.1:8079/tools/baidu-translation/",
"nllb-translation": "http://127.0.0.1:8079/tools/nllb-translation/",
}
valid_tools_info = load_valid_tools(tools_mappings)
print(valid_tools_info)
all_tools_list = sorted(list(valid_tools_info.keys()))
gr.close_all()
MAX_TURNS = 30
MAX_BOXES = MAX_TURNS * 2
def show_avatar_imgs(tools_chosen):
if len(tools_chosen) == 0:
tools_chosen = list(valid_tools_info.keys())
img_template = '<a href="{}" style="float: left"> <img style="margin:5px" src="{}.png" width="24" height="24" alt="avatar" /> {} </a>'
imgs = [valid_tools_info[tool]['avatar'] for tool in tools_chosen if valid_tools_info[tool]['avatar'] != None]
imgs = ' '.join([img_template.format(img, img, tool ) for img, tool in zip(imgs, tools_chosen) ])
return [gr.update(value='<span class="">'+imgs+'</span>', visible=True), gr.update(visible=True)]
return_msg = []
chat_history = ""
def answer_by_tools(question, tools_chosen, model_chosen):
global return_msg
return_msg += [(question, None), (None, '...')]
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY', '')
if len(tools_chosen) == 0: # if there is no tools chosen, we use all todo (TODO: What if the pool is too large.)
tools_chosen = list(valid_tools_info.keys())
if len(tools_chosen) == 1:
answerer = STQuestionAnswerer(OPENAI_API_KEY.strip(), stream_output=True, llm=model_chosen)
agent_executor = answerer.load_tools(tools_chosen[0], valid_tools_info[tools_chosen[0]], prompt_type="react-with-tool-description", return_intermediate_steps=True)
else:
answerer = MTQuestionAnswerer(OPENAI_API_KEY.strip(), load_valid_tools({k: tools_mappings[k] for k in tools_chosen}), stream_output=True, llm=model_chosen)
agent_executor = answerer.build_runner()
global chat_history
chat_history += "Question: " + question + "\n"
question = chat_history
for inter in agent_executor(question):
if isinstance(inter, AgentFinish): continue
result_str = []
return_msg.pop()
if isinstance(inter, dict):
result_str.append("<font color=red>Answer:</font> {}".format(inter['output']))
chat_history += "Answer:" + inter['output'] + "\n"
result_str.append("...")
else:
not_observation = inter[0].log
if not not_observation.startswith('Thought:'):
not_observation = "Thought: " + not_observation
chat_history += not_observation
not_observation = not_observation.replace('Thought:', '<font color=green>Thought: </font>')
not_observation = not_observation.replace('Action:', '<font color=purple>Action: </font>')
not_observation = not_observation.replace('Action Input:', '<font color=purple>Action Input: </font>')
result_str.append("{}".format(not_observation))
result_str.append("<font color=blue>Action output:</font>\n{}".format(inter[1]))
chat_history += "\nAction output:" + inter[1] + "\n"
result_str.append("...")
return_msg += [(None, result) for result in result_str]
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
return_msg.pop()
if return_msg[-1][1].startswith("<font color=red>Answer:</font> "):
return_msg[-1] = (return_msg[-1][0], return_msg[-1][1].replace("<font color=red>Answer:</font> ", "<font color=green>Final Answer:</font> "))
yield [gr.update(visible=True, value=return_msg), gr.update(visible=True), gr.update(visible=False)]
def retrieve(tools_search):
if tools_search == "":
return gr.update(choices=all_tools_list)
else:
url = "http://127.0.0.1:8079/retrieve"
param = {
"query": tools_search
}
response = requests.post(url, json=param)
result = response.json()
retrieved_tools = result["tools"]
return gr.update(choices=retrieved_tools)
def clear_retrieve():
return [gr.update(value=""), gr.update(choices=all_tools_list)]
def clear_history():
global return_msg
global chat_history
return_msg = []
chat_history = ""
yield gr.update(visible=True, value=return_msg)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=14):
gr.Markdown("<h1 align='left'> BMTools </h1>")
with gr.Column(scale=1):
gr.Markdown('<img src="https://openbmb.cn/openbmb/img/head_logo.e9d9f3f.png" width="140">')
with gr.Row():
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=0.85):
txt = gr.Textbox(show_label=False, placeholder="Question here. Use Shift+Enter to add new line.", lines=1).style(container=False)
with gr.Column(scale=0.15, min_width=0):
buttonClear = gr.Button("Clear History")
buttonStop = gr.Button("Stop", visible=False)
chatbot = gr.Chatbot(show_label=False, visible=True).style(height=600)
with gr.Column(scale=1):
with gr.Row():
tools_search = gr.Textbox(
lines=1,
label="Tools Search",
info="Please input some text to search tools.",
)
buttonSearch = gr.Button("Clear")
tools_chosen = gr.CheckboxGroup(
choices=all_tools_list,
value=["chemical-prop"],
label="Tools provided",
info="Choose the tools to solve your question.",
)
model_chosen = gr.Dropdown(
list(available_models), value=DEFAULTMODEL, multiselect=False, label="Model provided", info="Choose the model to solve your question, Default means ChatGPT."
)
tools_search.change(retrieve, tools_search, tools_chosen)
buttonSearch.click(clear_retrieve, [], [tools_search, tools_chosen])
txt.submit(lambda : [gr.update(value=''), gr.update(visible=False), gr.update(visible=True)], [], [txt, buttonClear, buttonStop])
inference_event = txt.submit(answer_by_tools, [txt, tools_chosen, model_chosen], [chatbot, buttonClear, buttonStop])
buttonStop.click(lambda : [gr.update(visible=True), gr.update(visible=False)], [], [buttonClear, buttonStop], cancels=[inference_event])
buttonClear.click(clear_history, [], chatbot)
demo.queue().launch(share=True, inbrowser=True, server_name="127.0.0.1", server_port=7001)
|
EXA-1-master
|
exa/libraries/BMTools/web_demo.py
|
from .tools.serve import ToolServer
from .utils.logging import get_logger
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/__init__.py
|
from .tool import Tool
from typing import Dict, Callable, Any, List
ToolBuilder = Callable[[Any], Tool]
FuncToolBuilder = Callable[[], ToolBuilder]
class ToolsRegistry:
def __init__(self) -> None:
self.tools : Dict[str, FuncToolBuilder] = {}
def register(self, tool_name : str, tool : FuncToolBuilder):
print(f"will register {tool_name}")
self.tools[tool_name] = tool
def build(self, tool_name, config) -> Tool:
ret = self.tools[tool_name]()(config)
if isinstance(ret, Tool):
return ret
raise ValueError("Tool builder {} did not return a Tool instance".format(tool_name))
def list_tools(self) -> List[str]:
return list(self.tools.keys())
tools_registry = ToolsRegistry()
def register(tool_name):
def decorator(tool : FuncToolBuilder):
tools_registry.register(tool_name, tool)
return tool
return decorator
def build_tool(tool_name : str, config : Any) -> Tool:
print(f"will build {tool_name}")
return tools_registry.build(tool_name, config)
def list_tools() -> List[str]:
return tools_registry.list_tools()
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/registry.py
|
from langchain.embeddings import OpenAIEmbeddings
from typing import List, Dict
from queue import PriorityQueue
import os
class Retriever:
def __init__(self,
openai_api_key: str = None,
model: str = "text-embedding-ada-002"):
if openai_api_key is None:
openai_api_key = os.environ.get("OPENAI_API_KEY")
self.embed = OpenAIEmbeddings(openai_api_key=openai_api_key, model=model)
self.documents = dict()
def add_tool(self, tool_name: str, api_info: Dict) -> None:
if tool_name in self.documents:
return
document = api_info["name_for_model"] + ". " + api_info["description_for_model"]
document_embedding = self.embed.embed_documents([document])
self.documents[tool_name] = {
"document": document,
"embedding": document_embedding[0]
}
def query(self, query: str, topk: int = 3) -> List[str]:
query_embedding = self.embed.embed_query(query)
queue = PriorityQueue()
for tool_name, tool_info in self.documents.items():
tool_embedding = tool_info["embedding"]
tool_sim = self.similarity(query_embedding, tool_embedding)
queue.put([-tool_sim, tool_name])
result = []
for i in range(min(topk, len(queue.queue))):
tool = queue.get()
result.append(tool[1])
return result
def similarity(self, query: List[float], document: List[float]) -> float:
return sum([i * j for i, j in zip(query, document)])
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/retriever.py
|
from . import chemical
from . import film
from . import kg
from . import stock
from . import weather
from . import wikipedia
from . import wolframalpha
from . import office
from . import bing_search
from . import translation
from . import tutorial
from .tool import Tool
from .registry import register
from .serve import ToolServer
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/__init__.py
|
import fastapi
import uvicorn
from .registry import build_tool, list_tools
from .retriever import Retriever
from typing import List
from pydantic import BaseModel
class RetrieveRequest(BaseModel):
query: str
topk: int = 3
def _bind_tool_server(t : "ToolServer"):
""" Add property API to ToolServer.
t.api is a FastAPI object
"""
@t.api.get("/")
def health():
return {
"status": "ok",
}
@t.api.get("/list")
def get_tools_list():
return {
"tools": t.list_tools(),
}
@t.api.get("/loaded")
def get_loaded_tools():
return {
"tools": list(t.loaded_tools),
}
@t.api.get("/.well-known/ai-plugin.json", include_in_schema=False)
def get_api_info():
return {
"schema_version": "v1",
"name_for_human": "BMTools",
"name_for_model": "BMTools",
"description_for_human": "tools to big models",
"description_for_model": "tools to big models",
"auth": {
"type": "none",
},
"api": {
"type": "openapi",
"url": "/openapi.json",
"is_user_authenticated": False,
},
"logo_url": None,
"contact_email": "",
"legal_info_url": "",
}
@t.api.post("/retrieve")
def retrieve(request: RetrieveRequest):
tool_list = t.retrieve(request.query, request.topk)
return {
"tools": tool_list
}
class ToolServer:
""" This class host your own API backend.
"""
def __init__(self) -> None:
# define the root API server
self.api = fastapi.FastAPI(
title="BMTools",
description="Tools for bigmodels",
)
self.loaded_tools = dict()
self.retriever = Retriever()
_bind_tool_server(self)
def load_tool(self, name : str, config = {}):
if self.is_loaded(name):
raise ValueError(f"Tool {name} is already loaded")
tool = build_tool(name, config)
self.loaded_tools[name] = tool.api_info
self.retriever.add_tool(name, tool.api_info)
# mount sub API server to the root API server, thus can mount all urls of sub API server to /tools/{name} route
self.api.mount(f"/tools/{name}", tool, name)
return
def is_loaded(self, name : str):
return name in self.loaded_tools
def serve(self, host : str = "0.0.0.0", port : int = 8079):
uvicorn.run(self.api, host=host, port=port)
def list_tools(self) -> List[str]:
return list_tools()
def retrieve(self, query: str, topk: int = 3) -> List[str]:
return self.retriever.query(query, topk)
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/serve.py
|
import fastapi
from typing import Optional
import copy
from starlette.middleware.sessions import SessionMiddleware
from fastapi import Request
class Tool(fastapi.FastAPI):
""" Tool is inherited from FastAPI class, thus:
1. It can act as a server
2. It has get method, you can use Tool.get method to bind a function to an url
3. It can be easily mounted to another server
4. It has a list of sub-routes, each route is a function
Diagram:
Root API server (ToolServer object)
│
├───── "./weather": Tool object
│ ├── "./get_weather_today": function_get_weather(location: str) -> str
│ ├── "./get_weather_forecast": function_get_weather_forcast(location: str, day_offset: int) -> str
│ └── "...more routes"
├───── "./wikidata": Tool object
│ ├── "... more routes"
└───── "... more routes"
"""
def __init__(
self,
tool_name : str,
description : str,
name_for_human : Optional[str] = None,
name_for_model : Optional[str] = None,
description_for_human : Optional[str] = None,
description_for_model : Optional[str] = None,
logo_url : Optional[str] = None,
author_github : Optional[str] = None,
contact_email : str = "",
legal_info_url : str = "",
version : str = "0.1.0",
):
super().__init__(
title=tool_name,
description=description,
version=version,
)
if name_for_human is None:
name_for_human = tool_name
if name_for_model is None:
name_for_model = name_for_human
if description_for_human is None:
description_for_human = description
if description_for_model is None:
description_for_model = description_for_human
self.api_info = {
"schema_version": "v1",
"name_for_human": name_for_human,
"name_for_model": name_for_model,
"description_for_human": description_for_human,
"description_for_model": description_for_model,
"auth": {
"type": "none",
},
"api": {
"type": "openapi",
"url": "/openapi.json",
"is_user_authenticated": False,
},
"author_github": author_github,
"logo_url": logo_url,
"contact_email": contact_email,
"legal_info_url": legal_info_url,
}
@self.get("/.well-known/ai-plugin.json", include_in_schema=False)
def get_api_info(request : fastapi.Request):
openapi_path = str(request.url).replace("/.well-known/ai-plugin.json", "/openapi.json")
info = copy.deepcopy(self.api_info)
info["api"]["url"] = str(openapi_path)
return info
self.add_middleware(
SessionMiddleware,
secret_key=tool_name,
session_cookie="session_{}".format(tool_name.replace(" ", "_")),
)
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/tool.py
|
from ..registry import register
@register("tutorial")
def tutorial():
from .api import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/tutorial/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'tutorial', "http://127.0.0.1:8079/tools/tutorial/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
print(tools_config)
agent = qa.load_tools(tools_name, tools_config)
answer = agent("I want to cook pizza.")
print(answer)
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/tutorial/test.py
|
import os
import random
import requests
import hashlib
from ..tool import Tool
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
def build_tool(config) -> Tool:
tool = Tool(
tool_name="Tutorial",
description="Provide tutorial for foundation model based on a given objective.",
name_for_model="Tutorial",
description_for_model="Plugin for providing tutorial for a given objective.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="xin.cong@outlook.com",
legal_info_url="hello@legal.com"
)
prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
key = os.environ.get("OPENAI_API_KEY")
llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.0, openai_api_key=key)
chain = LLMChain(llm=llm, prompt=prompt)
@tool.get("/tutorial")
def tutorial(text: str) -> str:
"""
tutorial(text: str) -> str: Providing a TODO list as a toturial for the foundation model based on the given objective.
"""
result = chain.run(text)
return result
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/tutorial/api.py
|
from ..registry import register
@register("wikipedia")
def wikipedia():
from .api import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/wikipedia/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
import requests
import json
# at = "{\"entity\": \"Arthur\"s Magazine\"}"
# print(at[19])
# print(len(at))
# a = json.loads("{\"entity\": \"Arthur\"s Magazine\"}")
# print(a)
tool_name, tool_url = 'wikipedia', "http://127.0.0.1:8079/tools/wikipedia/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("Which magazine was started first, Arthur’s Magazine or First for Women?")
agent("when was the first hunger games book published?")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/wikipedia/test.py
|
import requests
from bs4 import BeautifulSoup
from ..tool import Tool, Request
from uuid import UUID
def clean_str(p):
return p.encode().decode("unicode-escape").encode("latin1").decode("utf-8")
class WikiPage:
def __init__(self):
self.page = ""
self.paragraphs = []
self.sentences = []
self.lookup_cnt = 0
self.lookup_list = []
self.lookup_keyword = None
def reset_page(self):
self.page = ""
self.paragraphs = []
self.sentences = []
self.lookup_cnt = 0
self.lookup_list = []
self.lookup_keyword = None
def get_page_obs(self, page):
self.page = page
paragraphs = []
sentences = []
# find all paragraphs
paragraphs = page.split("\n")
paragraphs = [p.strip() for p in paragraphs if p.strip()]
# find all sentence
sentences = []
for p in paragraphs:
sentences += p.split('. ')
sentences = [s.strip() + '.' for s in sentences if s.strip()]
self.paragraphs = paragraphs
self.sentences = sentences
return ' '.join(sentences[:5])
def construct_lookup_list(self, keyword : str):
sentences = self.sentences
parts = []
for index, p in enumerate(sentences):
if keyword.lower() in p.lower():
parts.append(index)
self.lookup_list = parts
self.lookup_keyword = keyword
self.lookup_cnt = 0
currentPage = WikiPage()
def build_tool(config) -> Tool:
tool = Tool(
"Wikipedia Search",
"Plugin for wikipedia",
name_for_model="Wikipedia",
name_for_human="Wikipedia",
description_for_model="A tool to search entity, view content and disambiguate entity on Wikipedia.\nCurrent endpoint for each function is simple and you should only use exact entity name as input for search and disambiguate. And the keyword input to lookup api should also be simple like one or two words.\nSome Tips to use the APIs bertter:\n1. When the search api doesn't find the corresponding page, you should search a related entity in the return list.\n2. You can only search one entity name in each action, so, don't concat multiple entity names in one search input.\n3. The lookup api can only be used after search api since it depends on the result page of search.\n4. When search api result in an entity page that is not related, you should disambiguate the searched entity to find other entities with the same name.\n5. Don't over rely one this simple tool, you may figure out the next action based on your own knowledge.",
description_for_human="A tool to search entity, view content and disambiguate entity on Wikipedia.\nCurrent endpoint for each function is simple and you should only use exact entity name as input for search and disambiguate. And the keyword input to lookup api should also be simple like one or two words.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
@tool.get("/search")
def search(entity : str, request : Request):
'''The input is an exact entity name. The action will search this entity name on Wikipedia and returns the first five sentences if it exists. If not, it will return some related entities to search next.
'''
entity_ = entity.replace(" ", "+")
search_url = f"https://en.wikipedia.org/w/index.php?search={entity_}"
response_text = requests.get(search_url).text
soup = BeautifulSoup(response_text, features="html.parser")
result_divs = soup.find_all("div", {"class": "mw-search-result-heading"})
if result_divs: # mismatch
result_titles = [clean_str(div.get_text().strip()) for div in result_divs]
obs = f"Could not find {entity}. Similar: {result_titles[:5]}."
else:
local_page = [p.get_text().strip() for p in soup.find_all("p") + soup.find_all("ul")]
if any("may refer to:" in p for p in local_page):
obs = search("[" + entity + "]", request)
else:
currentPage.reset_page()
page = ""
for p in local_page:
if len(p.split(" ")) > 2:
page += clean_str(p)
if not p.endswith("\n"):
page += "\n"
obs = currentPage.get_page_obs(page)
return obs
@tool.get("/lookup")
def lookup(keyword : str, request : Request) -> str:
'''The input is a keyword. This action will look up in the current passage and return the next several sentences containing the keyword in current passage.
'''
# lookup_keyword = request.session["lookup_keyword"]
# lookup_list = request.session["lookup_list"]
# lookup_cnt = request.session["lookup_cnt"]
# sentences = request.session["sentences"]
lookup_keyword = currentPage.lookup_keyword
if lookup_keyword != keyword: # reset lookup
currentPage.construct_lookup_list(keyword)
lookup_list = currentPage.lookup_list
lookup_cnt = currentPage.lookup_cnt
sentences = currentPage.sentences
if lookup_cnt >= len(lookup_list):
obs = "No more results."
else:
index = lookup_list[lookup_cnt]
before_sentence_num = min(index, 1)
max_sentence_num = 3 # 一共3句话
lookup_result = ' '.join(sentences[index - before_sentence_num: index - before_sentence_num + max_sentence_num])
obs = f"(Result {lookup_cnt + 1} / {len(lookup_list)}) " + lookup_result
currentPage.lookup_cnt += 1
return obs
@tool.get("/disambiguation")
def disambiguation(entity : str) -> str:
'''The input is an entity name. This action will disambiguate this entity name to find other entities with similar names in Wikipedia.
'''
# self.reset_pages()
url = f"https://en.wikipedia.org/wiki/{entity}_(disambiguation)"
# url = f"https://en.wikipedia.org{href}"
response = requests.get(url)
html_code = response.content
soup = BeautifulSoup(html_code, "html.parser")
# Extract all the list items from the page
list_items = soup.find_all("li")
# Extract the text content of each list item and print it
titles = []
for item in list_items:
link = item.find("a")
if link and entity.lower() in item.get_text().lower() and "/wiki" in link["href"]:
titles.append(link.get_text())
# print(f"{link.get_text()} - {link['href']}")
# print(item.get_text())
# print("\n")
whether_need_disambiguation = True
max_return_titles = 5
if len(titles) > max_return_titles:
titles = titles[:max_return_titles]
obs = f"Related entities to {entity}: {titles}"
return obs
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/wikipedia/api.py
|
from ..registry import register
@register("douban-film")
def douban_film():
from .douban import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/film/__init__.py
|
from .api import build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/film/douban/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'douban', "http://127.0.0.1:8079/tools/douban-film/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
# tools_name, tools_config = load_single_tools()
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("有哪些即将上映的中国喜剧电影?哪些是大家最想看的前5部?")
agent("想去电影院看一些国产电影,有评分高的吗?输出3部")
agent("帮我介绍下《深海》这部电影")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/film/douban/test.py
|
import requests
from lxml import etree
import pandas as pd
from translate import Translator
import re
from ...tool import Tool
def build_tool(config) -> Tool:
tool = Tool(
"Film Search Plugin",
"search for up-to-date film information.",
name_for_model="Film Search",
description_for_model="Plugin for search for up-to-date film information.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
def fetch_page(url : str):
"""get_name(url: str) print html text of url
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/108.0.0.0 Safari/537.36'}
s = requests.session()
s.keep_alive = False
response = s.get(url, headers=headers, verify=False)
return response
def parse_coming_page():
"""parse_coming_page() prints the details of the all coming films, including date, title, cate, region, wantWatchPeopleNum, link
"""
# 获取即将上映的电影列表
url = 'https://movie.douban.com/coming'
response = fetch_page(url)
df_filmsComing = pd.DataFrame(columns=["date", "title", "cate", "region", "wantWatchPeopleNum", 'link'])
parser = etree.HTMLParser(encoding='utf-8')
tree = etree.HTML(response.text, parser=parser)
movies_table_path = '//*[@id="content"]/div/div[1]/table/tbody'
movies_table = tree.xpath(movies_table_path)
for filmChild in movies_table[0].iter('tr'):
filmTime = filmChild.xpath('td[1]/text()')[0].strip()
filmName = filmChild.xpath('td[2]/a/text()')[0]
filmType = filmChild.xpath('td[3]/text()')[0].strip()
filmRegion = filmChild.xpath('td[4]/text()')[0].strip()
filmWantWatching = filmChild.xpath('td[5]/text()')[0].strip()
filmLink = filmChild.xpath('td[2]/a/@href')[0]
df_filmsComing.loc[len(df_filmsComing.index)] = [
filmTime, filmName, filmType, filmRegion, filmWantWatching, filmLink
]
return df_filmsComing
def parse_nowplaying_page():
"""parse_nowplaying_page() prints the details of the all playing films now, including title, score, region, director, actors, link
"""
# 获取正在上映的电影列表
url = 'https://movie.douban.com/cinema/nowplaying/beijing/'
response = fetch_page(url)
df_filmsNowPlaying = pd.DataFrame(columns=["title", "score", "region", "director", "actors", 'link'])
parser = etree.HTMLParser(encoding='utf-8')
tree = etree.HTML(response.text, parser=parser)
movies_table_path = './/div[@id="nowplaying"]/div[2]/ul'
movies_table = tree.xpath(movies_table_path)
for filmChild in movies_table[0]:
filmName = filmChild.xpath('@data-title')[0]
filmScore = filmChild.xpath('@data-score')[0]
filmRegion = filmChild.xpath('@data-region')[0]
filmDirector = filmChild.xpath('@data-director')[0]
filmActors = filmChild.xpath('@data-actors')[0]
filmLink = filmChild.xpath('ul/li[1]/a/@href')[0]
df_filmsNowPlaying.loc[len(df_filmsNowPlaying.index)] = [
filmName, filmScore, filmRegion, filmDirector, filmActors, filmLink
]
return df_filmsNowPlaying
def parse_detail_page(response):
"""parse_detail_page(response) get information from response.text
"""
parser = etree.HTMLParser(encoding='utf-8')
tree = etree.HTML(response.text, parser=parser)
info_path = './/div[@class="subject clearfix"]/div[2]'
director = tree.xpath(f'{info_path}/span[1]/span[2]/a/text()')[0]
actors = []
actors_spans = tree.xpath(f'{info_path}/span[3]/span[2]')[0]
for actors_span in actors_spans:
actors.append(actors_span.text)
actors = '、'.join(actors[:3])
types = []
spans = tree.xpath(f'{info_path}')[0]
for span in spans.iter('span'):
if 'property' in span.attrib and span.attrib['property']=='v:genre':
types.append(span.text)
types = '、'.join(types)
for span in spans:
if span.text=='制片国家/地区:':
region = span.tail.strip()
break
Synopsis = tree.xpath('.//div[@class="related-info"]/div/span')[0].text.strip()
detail = f'是一部{region}的{types}电影,由{director}导演,{actors}等人主演.\n剧情简介:{Synopsis}'
return detail
@tool.get("/coming_out_filter")
def coming_out_filter(args : str):
"""coming_out_filter(args: str) prints the details of the filtered [outNum] coming films now according to region, cate and outNum.
args is a list like 'str1, str2, str3, str4'
str1 represents Production country or region. If you cannot find a region, str1 is 全部
str2 represents movie's category. If you cannot find a category, str2 is 全部
str3 can be a integer number that agent want to get. If you cannot find a number, str2 is 100. If the found movie's num is less than str2, Final Answer only print [the found movie's num] movies.
str4 can be a True or False that refluct whether agent want the result sorted by people number which look forward to the movie.
Final answer should be complete.
This is an example:
Thought: I need to find the upcoming Chinese drama movies and the top 2 most wanted movies
Action: coming_out_filter
Action Input: {"args" : "中国, 剧情, 2, True"}
Observation: {"date":{"23":"04月28日","50":"07月"},"title":{"23":"长空之王","50":"热烈"},"cate":{"23":"剧情 / 动作","50":"剧情 / 喜剧"},"region":{"23":"中国大陆","50":"中国大陆"},"wantWatchPeopleNum":{"23":"39303人","50":"26831人"}}
Thought: I now know the top 2 upcoming Chinese drama movies
Final Answer: 即将上映的中国剧情电影有2部:长空之王、热烈,大家最想看的前2部分别是:长空之王、热烈。
"""
args = re.findall(r'\b\w+\b', args)
region = args[0]
if region=='全部':
region = ''
cate = args[1]
if cate=='全部':
cate = ''
outNum = int(args[2])
WantSort = True if args[3]=='True' else False
df = parse_coming_page()
df_recon = pd.DataFrame.copy(df, deep=True)
# 即将上映的某类型电影,根据想看人数、地区、类型进行筛选
df_recon['wantWatchPeopleNum'] = df_recon['wantWatchPeopleNum'].apply(lambda x: int(x.replace('人', '')))
df_recon = df_recon[df_recon['cate'].str.contains(cate)]
df_recon = df_recon[df_recon['region'].str.contains(region)]
# 最后根据想看人数降序排列
if WantSort:
df_recon.sort_values(by="wantWatchPeopleNum" , inplace=True, ascending = not WantSort)
outDf = df_recon[:outNum]
return df.loc[outDf.index, 'date':'wantWatchPeopleNum']
@tool.get("/now_playing_out_filter")
def now_playing_out_filter(args : str):
"""NowPlayingOutFilter(args: str) prints the details of the filtered [outNum] playing films now according to region, scoreSort
args is a list like 'str1, str2, str3'
str1 can be '中国','日本' or other Production country or region. If you cannot find a region, str1 is 全部
str2 can be a integer number that agent want to get. If you cannot find a number, str2 is 100. If the found movie's num is less than str2, Final Answer only print [the found movie's num] movies.
str3 can be a True or False that refluct whether agent want the result sorted by score.
Final answer should be complete.
This is an example:
Input: 您知道现在有正在上映中国的电影吗?请输出3部
Thought: I need to find the currently playing movies with the highest scores
Action: now_playing_out_filter
Action Input: {"args" : "全部, 3, True"}
Observation: {"title":{"34":"切腹","53":"吉赛尔","31":"小森林 夏秋篇"},"score":{"34":"9.4","53":"9.2","31":"9.0"},"region":{"34":"日本","53":"西德","31":"日本"},"director":{"34":"小林正树","53":"Hugo Niebeling","31":"森淳一"},"actors":{"34":"仲代达矢 / 石浜朗 / 岩下志麻","53":"卡拉·弗拉奇 / 埃里克·布鲁恩 / Bruce Marks","31":"桥本爱 / 三浦贵大 / 松冈茉优"}}
Thought: I now know the currently playing movies with the highest scores
Final Answer: 现在上映的评分最高的3部电影是:切腹、吉赛尔、小森林 夏秋篇
"""
args = re.findall(r'\b\w+\b', args)
region = args[0]
if region=='全部':
region = ''
outNum = int(args[1])
scoreSort = True if args[2]=='True' else False
df = parse_nowplaying_page()
df_recon = pd.DataFrame.copy(df, deep=True)
df_recon['score'] = df_recon['score'].apply(lambda x: float(x))
# 正在上映的某类型电影,根据地区进行筛选
df_recon = df_recon[df_recon['region'].str.contains(region)]
# 最后根据评分降序排列
if scoreSort:
df_recon.sort_values(by="score" , inplace=True, ascending = not scoreSort)
outDf = df_recon[:outNum]
return df.loc[outDf.index, 'title':'actors']
@tool.get("/print_detail")
def print_detail(args : str):
"""parsing_detail_page(args) prints the details of a movie, giving its name.
args is a list like 'str1'
str1 is target movie's name.
step1: apply function parse_coming_page and parse_nowplaying_page and get all movie's links and other infomation.
step2: get the target movie's link from df_coming or df_nowplaying
step3: get detail from step2's link
This is an example:
Input: "电影流浪地球2怎么样?"
Thought: I need to find the movie's information
Action: print_detail
Action Input: {"args" : "流浪地球2"}
Observation: "是一部中国大陆的科幻、冒险、灾难电影,由郭帆导演,吴京、刘德华、李雪健等人主演.\n剧情简介:太阳即将毁灭,人类在地球表面建造出巨大的推进器,寻找新的家园。然而宇宙之路危机四伏,为了拯救地球,流浪地球时代的年轻人再次挺身而出,展开争分夺秒的生死之战。"
Thought: I now know the final answer
Final Answer: 流浪地球2是一部中国大陆的科幻、冒险、灾难电影,由郭帆导演,吴京、刘德华、李雪健等人主演,剧情简介是太阳即将毁灭,人类在地球表面建造出巨大的推进器,寻找新的家园,然而宇宙之路危机四伏,为了拯救地球,流浪地球时代的年轻人再次挺身而出,
"""
args = re.findall(r'\b\w+\b', args)
filmName = args[0]
df_coming = parse_coming_page()
df_nowplaying = parse_nowplaying_page()
if filmName in list(df_coming['title']):
df = df_coming
url = df[df['title']==filmName]['link'].values[0]
response = fetch_page(url)
detail = parse_detail_page(response)
elif filmName in list(df_nowplaying['title']):
df = df_nowplaying
url = df[df['title']==filmName]['link'].values[0]
response = fetch_page(url)
detail = parse_detail_page(response)
return f'{filmName}{detail}'
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/film/douban/api.py
|
from ..registry import register
@register("office-ppt")
def office_ppt():
from .ppt import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/office/__init__.py
|
from .api import build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/office/ppt/__init__.py
|
import collections
import collections.abc
from pptx import Presentation
import requests
import os
import time
import json
import sys
from ...tool import Tool
def build_tool(config) -> Tool:
tool = Tool(
"Slides Making",
"This tool allows you to create ppt slides with text, paragraph, images, with good looking styles",
name_for_model="Slides Making",
description_for_model="This tool allows you to create ppt slides with text, paragraph, images, with good looking styles",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="bokesyo2000@gmail.com",
legal_info_url="hello@legal.com"
)
CWD = os.getcwd() # path of current working directory
LIB_DIR = os.path.dirname(__file__) # path of library
TEMPLATE_DIR = os.path.join(LIB_DIR, "templates") # path of templates
CACHE_DIR = os.path.join(CWD, "cache") # path of cache_dir
IMAGE_BED_PATTERN = 'https://source.unsplash.com/featured/?{}' # url pattern for image bed
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
# print(f"[system]: office.ppt: created cache directory: {CACHE_DIR}")
# print(f"[system]: office.ppt_functional: TEMPLATE_DIR = {TEMPLATE_DIR}")
# print(f"[system]: office.ppt_functional: CACHE_DIR = {CACHE_DIR}")
ppt_file = None # a pointer to the powerpoint object
def _return_timestamp():
return str(time.time())
def runtime_update_docstring(new_docstring: str) -> callable:
""" This is a decorator that can help update the docstring at runtime
"""
def decorator(func: callable) -> callable:
func.__doc__ = new_docstring
return func
return decorator
# Update the template list, then update the docstring of create_file
ppt_template_names = []
all_files = os.listdir(TEMPLATE_DIR)
for file_name in all_files:
if file_name.lower().endswith('.pptx'):
ppt_template_names.append(file_name.split(".")[0])
updated_docstring_create_file = f"""create_file(theme:str) -> None: Create a pptx file with specific theme, available thems: {' / '.join(ppt_template_names)}."""
@tool.get("/create_file")
@runtime_update_docstring(updated_docstring_create_file)
def create_file(theme:str) -> str:
"""create_file(theme:str) -> None Create a pptx file with specific themes. Available themes: <update at runtime>
"""
nonlocal ppt_file
ppt_file = Presentation(os.path.join(TEMPLATE_DIR, f"{theme}.pptx"))
return "created a ppt file."
@tool.get("/get_image")
def get_image(keywords:str) -> str:
"""get_image(keywords:str) -> str Get an image given comma seperated keywords, return the image path.
"""
picture_url = IMAGE_BED_PATTERN.format(keywords)
response = requests.get(picture_url)
img_local_path = os.path.join(CACHE_DIR, f"{_return_timestamp()}.jpg")
with open(img_local_path, 'wb') as f:
f.write(response.content)
return img_local_path
@tool.get("/add_first_page")
def add_first_page(title:str, subtitle:str) -> str:
"""add_first_page(title:str, subtitle:str) -> None: Add the first page of ppt.
"""
nonlocal ppt_file
slide = ppt_file.slides.add_slide(ppt_file.slide_layouts[0]) # layout for first page (title and subtitle only)
title_shape = slide.shapes.title
subtitle_shape = slide.placeholders[1]
title_shape.text = title
subtitle_shape.text = subtitle
return "added page"
@tool.get("/add_text_page")
def add_text_page(title:str, bullet_items:str) -> str:
"""add_text_page(title:str, bullet_items:str) -> None: Add text page (outline page is also applied).
bullet_items should be string, for multiple bullet items, please use [SPAN] to separate them.
"""
nonlocal ppt_file
slide = ppt_file.slides.add_slide(ppt_file.slide_layouts[1])
title_shape = slide.shapes.title
body_shape = slide.placeholders[1]
title_shape.text = title
tf = body_shape.text_frame
bullet_items = bullet_items.split("[SPAN]")
for bullet_item in bullet_items:
bullet_item_strip = bullet_item.strip()
p = tf.add_paragraph()
p.text = bullet_item_strip
p.level = 1
return "added page"
@tool.get("/add_text_image_page")
def add_text_image_page(title:str, bullet_items:str, image:str) -> str:
"""add_text_image_page(title:str, bullet_items:str, image:str) -> None: Add a text page with one image. (image should be a path)
bullet_items should be string, for multiple bullet items, please use [SPAN] to separate them.
"""
nonlocal ppt_file
slide = ppt_file.slides.add_slide(ppt_file.slide_layouts[3])
title_shape = slide.shapes.title
title_shape.text = title
body_shape = slide.placeholders[1]
tf = body_shape.text_frame
bullet_items = bullet_items.split("[SPAN]")
for bullet_item in bullet_items:
bullet_item_strip = bullet_item.strip()
p = tf.add_paragraph()
p.text = bullet_item_strip
p.level = 1
image_shape = slide.placeholders[2]
slide.shapes.add_picture(image, image_shape.left, image_shape.top, image_shape.width, image_shape.height)
return "added page"
@tool.get("/submit_file")
def submit_file() -> None:
"""submit_file() -> None: When all steps done, YOU MUST use submit_file() to submit your work.
"""
nonlocal ppt_file
file_path = os.path.join(CACHE_DIR, f"{_return_timestamp()}.pptx")
ppt_file.save(file_path)
# retreival_url = upload_file(file_path)
return f"submitted. view ppt at {file_path}"
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/office/ppt/api.py
|
from ..registry import register
@register("nllb-translation")
def translator():
from .nllb import build_tool
return build_tool
@register("baidu-translation")
def translator():
from .baidu import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/translation/__init__.py
|
from .api import build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/translation/baidu/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'baidu-translation', "http://127.0.0.1:8079/tools/baidu-translation/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
print(tools_config)
agent = qa.load_tools(tools_name, tools_config)
answer = agent("Translate this sentence into Chinese(zh): Hello, world!")
print(answer)
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/translation/baidu/test.py
|
import os
import random
import requests
import hashlib
from ...tool import Tool
def build_tool(config) -> Tool:
tool = Tool(
"Translator Info",
"Translate a given text from one language to another.",
name_for_model="Translator",
description_for_model="Plugin for translating text from one language to another.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="shihaoliang0828@gmail.com",
legal_info_url="hello@legal.com"
)
subscription_key = os.getenv("BAIDU_TRANSLATE_KEY", None)
if subscription_key is None:
raise Exception("BAIDU_TRANSLATE_KEY is not set")
secret_key = os.getenv("BAIDU_SECRET_KEY", None)
if secret_key is None:
raise Exception("BAIDU_SECRET_KEY is not set")
endpoint = 'https://fanyi-api.baidu.com/api/trans/vip/translate'
fromLang = 'auto'
salt = random.randint(32768,65536)
header = {'Content-Type': 'application/x-www-form-urlencoded'}
@tool.get("/get_translation")
def get_translation(text:str, tgt_lang:str) -> str:
sign = subscription_key + text + str(salt) + secret_key
md = hashlib.md5()
md.update(sign.encode(encoding='utf-8'))
sign =md.hexdigest()
data = {
"appid": subscription_key,
"q": text,
"from": fromLang,
"to": tgt_lang,
"salt": salt,
"sign": sign
}
response = requests.post(endpoint, params= data, headers= header)
text = response.json()
results = text['trans_result'][0]['dst']
return results
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/translation/baidu/api.py
|
from .api import build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/translation/nllb/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'nllb-translation', "http://127.0.0.1:8079/tools/nllb-translation/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
print(tools_config)
agent = qa.load_tools(tools_name, tools_config)
answer = agent("Translate this sentence into Chinese(zho_Hans): Hello, world!")
print(answer)
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/translation/nllb/test.py
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
from ...tool import Tool
def build_tool(config) -> Tool:
tool = Tool(
"Translator Info",
"Translate a given text from one language to another.",
name_for_model="Translator",
description_for_model="Plugin for translating text from one language to another.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="shihaoliang0828@gmail.com",
legal_info_url="hello@legal.com"
)
BASE_MODEL = (config["model"] if "model" in config else "facebook/nllb-200-distilled-600M")
SRC_LANG = (config["src_lang"] if "src_lang" in config else "eng_Latn")
tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, use_auth_token=True, src_lang=SRC_LANG)
model = AutoModelForSeq2SeqLM.from_pretrained(BASE_MODEL, use_auth_token=True)
@tool.get("/get_translation")
def get_translation(input_text:str or list, tgt_lang:str, max_length:int) -> str or list:
inputs = tokenizer(input_text, return_tensors="pt", padding=True)
translated_tokens = model.generate(
**inputs, forced_bos_token_id=tokenizer.lang_code_to_id[tgt_lang], max_length=max_length)
if isinstance(input_text, str):
translations = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
elif isinstance(input_text, list):
translations = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)
return translations
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/translation/nllb/api.py
|
from ..registry import register
@register("wolframalpha")
def wolframalpha():
from .api import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/wolframalpha/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'wolframalpha', "http://127.0.0.1:8079/tools/wolframalpha/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("Calc integral of sin(x)+2x^2+3x+1 from 0 to 1")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/wolframalpha/test.py
|
import requests
from bs4 import BeautifulSoup
from ..tool import Tool
from pydantic import BaseModel
from typing import Any, Optional
from uuid import UUID
import fastapi
from fastapi_sessions.backends.implementations import InMemoryBackend
from fastapi_sessions.session_verifier import SessionVerifier
from fastapi_sessions.frontends.implementations import SessionCookie, CookieParameters
import os
import json
import xmltodict
def build_tool(config) -> Tool:
tool = Tool(
"Wolfram",
"Wolfram",
name_for_model="Wolfram",
name_for_human="Wolfram",
description_for_model=""""Dynamic computation and curated data from WolframAlpha and Wolfram Cloud.\nOnly use the getWolframAlphaResults endpoints; all other Wolfram endpoints are deprecated.\nPrefer getWolframAlphaResults unless Wolfram Language code should be evaluated.\nTry to include images returned by getWolframAlphaResults. Queries to getWolframAlphaResults must ALWAYS have this structure: {\"input\": query}.\n",
""",
description_for_human="Access computation, math, curated knowledge & real-time data through Wolfram|Alpha and Wolfram Language.",
logo_url="https://www.wolframcdn.com/images/icons/Wolfram.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
@tool.get("/getWolframAlphaResults")
def getWolframAlphaResults(input:str):
"""Get Wolfram|Alpha results using natural query. Queries to getWolframAlphaResults must ALWAYS have this structure: {\"input\": query}. And please directly read the output json.
"""
URL = "https://api.wolframalpha.com/v2/query"
APPID = os.environ.get("WOLFRAMALPH_APP_ID", "")
if len(APPID) == 0:
print("You should set you APPID by `export WOLFRAMALPH_APP_ID=XXXXX`")
params = {'appid': APPID, "input": input}
response = requests.get(URL, params=params)
json_data = xmltodict.parse(response.text)
if 'pod' not in json_data["queryresult"]:
return "WolframAlpha API cannot parse the input query."
rets = json_data["queryresult"]['pod']
cleaned_rets = []
blacklist = ["@scanner", "@id", "@position", "@error", "@numsubpods", "@width", "@height", "@type", "@themes","@colorinvertable", "expressiontypes"]
def filter_dict(d, blacklist):
if isinstance(d, dict):
return {k: filter_dict(v, blacklist) for k, v in d.items() if k not in blacklist}
elif isinstance(d, list):
return [filter_dict(i, blacklist) for i in d]
else:
return d
for ret in rets:
ret = filter_dict(ret, blacklist=blacklist)
cleaned_rets.append(ret)
return cleaned_rets
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/wolframalpha/api.py
|
from ..registry import register
@register("map")
def map():
from .api import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/map/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'Map', "http://127.0.0.1:8079/tools/wolframalpha/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("What's the driving distance from Beijing to Shanghai?")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/map/test.py
|
import requests
import os
import json
from ..tool import Tool
map_key = os.getenv("MAP_KEY", None)
def build_tool(config) -> Tool:
tool = Tool(
"Map Info",
"Look up stock information",
name_for_model="Map",
description_for_model="Plugin for look up map information",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
KEY = config["key"]
if map_key is not None:
KEY = os.getenv("MAP_KEY", None)
BASE_URL = 'http://dev.virtualearth.net/REST/V1/'
@tool.get("/get_distance")
def get_distance(start:str, end:str):
# Request URL
url = BASE_URL + "Routes/Driving?o=json&wp.0=" + start + "&wp.1=" + end + "&key=" + KEY
# GET request
r = requests.get(url)
data = json.loads(r.text)
# Extract route information
route = data["resourceSets"][0]["resources"][0]
# Extract distance in miles
distance = route["travelDistance"]
return distance
@tool.get("/get_route")
def get_route(start:str, end:str):
# Request URL
url = BASE_URL + "Routes/Driving?o=json&wp.0=" + start + "&wp.1=" + end + "&key=" + KEY
# GET request
r = requests.get(url)
data = json.loads(r.text)
# Extract route information
route = data["resourceSets"][0]["resources"][0]
itinerary = route["routeLegs"][0]["itineraryItems"]
# Extract route text information
route_text = []
for item in itinerary:
if "instruction" in item:
route_text.append(item["instruction"]["text"])
return route_text
@tool.get("/get_lat_lon")
def get_lat_lon(location):
url = BASE_URL + "Locations"
params = {
"query": location,
"key": KEY
}
response = requests.get(url, params=params)
json_data = response.json()
lat_lon = json_data["resourceSets"][0]["resources"][0]["point"]["coordinates"]
return lat_lon
@tool.get("/search_nearby")
def search_nearyby(search_term="restaurant", latitude = 0.0, longitude = 0.0, places='unknown', radius = 5000): # radius in meters)
url = BASE_URL + "LocalSearch"
if places != 'unknown':
latitude = get_lat_lon(places)[0]
longitude = get_lat_lon(places)[1]
# Build the request query string
params = {
"query": search_term,
"userLocation": f"{latitude},{longitude}",
"radius": radius,
"key": KEY
}
# Make the request
response = requests.get(url, params=params)
# Parse the response
response_data = json.loads(response.content)
# Get the results
results = response_data["resourceSets"][0]["resources"]
return results
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/map/api.py
|
from ..registry import register
@register("bing_search")
def bing_search():
from .api import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/bing_search/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'bing_search', "http://127.0.0.1:8079/tools/bing_search/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("Search for the tallest person in the world.")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/bing_search/test.py
|
import requests
from bs4 import BeautifulSoup
from ..tool import Tool
import os
from enum import Enum
subscription_key = os.getenv("BING_SUBSCRIPT_KEY", None)
if subscription_key is None:
raise Exception("BING_SUBSCRIPT_KEY is not set")
endpoint = "https://api.bing.microsoft.com/v7.0/search"
mkt = 'en-US'
headers = { 'Ocp-Apim-Subscription-Key': subscription_key }
# search result list chunk size
SEARCH_RESULT_LIST_CHUNK_SIZE = 3
# result target page text chunk content length
RESULT_TARGET_PAGE_PER_TEXT_COUNT = 500
class Operation(Enum):
PAGE_DOWN = 'A'
PAGE_UP = 'B'
GO_BACK = 'C'
ADD_DIGEST = 'D'
MERGE = 'E'
LOAD_PAGE_1 = 'F'
LOAD_PAGE_2 = 'G'
LOAD_PAGE_3 = 'H'
END = 'I'
SEARCH = 'J'
START = 'K'
REJECT = 'L'
TO_TOP = 'M'
class CONTENT_TYPE(Enum):
SEARCH_RESULT = 0
RESULT_TARGET_PAGE = 1
class ContentItem:
def __init__(self, type: CONTENT_TYPE, data):
self.type = type
self.data = data
class DigestData:
title: str
desc: str
chunkIndex: int
class Digest:
datas: list
checked: bool
class SessionData:
topic = None
content = []
digests = []
curResultChunk = 0
curTargetPageResultChunk = 0
data = SessionData()
def build_tool(config) -> Tool:
tool = Tool(
"Bing_search",
"Bing_search",
name_for_model="Bing_search",
name_for_human="Bing_search",
description_for_model="""Perform Search on Bing Search engine.
Use search_top3(key: str) to get top 3 search results after input the key to search.
Use load_page_index(idx: int) to load the detailed page of the search result.""",
description_for_human="Bing search API for browsing the internet and search for results.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
@tool.get("/search_top3")
def search_top3(key_words: str) -> str:
"""Search key words, return top 3 search results.
"""
top3 = search_all(key_words)[:3]
output = ""
for idx, item in enumerate(top3):
output += "page: " + str(idx+1) + "\n"
output += "title: " + item['name'] + "\n"
output += "summary: " + item['snippet'] + "\n"
return output
def search_all(key_words: str, data: SessionData = data) -> list:
"""Search key_words, return a list of class SearchResult.
Keyword arguments:
key_words -- key words want to search
"""
try:
result = requests.get(endpoint, headers=headers, params={'q': key_words, 'mkt': mkt }, timeout=10)
except Exception:
result = requests.get(endpoint, headers=headers, params={'q': key_words, 'mkt': mkt }, timeout=10)
if result.status_code == 200:
result = result.json()
data.content = []
data.content.append(ContentItem(CONTENT_TYPE.SEARCH_RESULT, result))
data.curResultChunk = 0
else:
result = requests.get(endpoint, headers=headers, params={'q': key_words, 'mkt': mkt }, timeout=10)
if result.status_code == 200:
result = result.json()
data.content = []
data.content.append(ContentItem(CONTENT_TYPE.SEARCH_RESULT, result))
data.curResultChunk = 0
else:
raise Exception('Platform search error.')
# print(f'search time:{time.time() - start_time}s')
return data.content[-1].data["webPages"]["value"]
@tool.get("/load_page_index")
def load_page_index(idx: str) -> str:
"""Load page detail of the search result indexed as 'idx', and return the content of the page.
"""
idx = int(idx)
href, text = load_page(idx-1)
if len(text) > 500:
return text[:500]
else:
return text
def load_page(idx:int, data: SessionData = data):
try:
top = data.content[-1].data["webPages"]["value"]
res = requests.get(top[idx]['url'], timeout=15)
if res.status_code == 200:
res.raise_for_status()
res.encoding = res.apparent_encoding
content = res.text
soup = BeautifulSoup(content, 'html.parser')
paragraphs = soup.find_all('p')
page_detail = ""
for p in paragraphs:
text = p.get_text().strip()
page_detail += text
# trafilatura may be used to extract the main content of the page
# import trafilatura
# page_detail = trafilatura.extract(soup, timeout=60)
return top[idx]['url'], page_detail
else:
return " ", "Timeout for loading this page, Please try to load another one or search again."
except:
return " ", "Timeout for loading this page, Please try to load another one or search again."
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/bing_search/api.py
|
from ..registry import register
@register("wikidata")
def wikidata():
from .wikidata import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/kg/__init__.py
|
from .api import build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/kg/wikidata/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'wikidata', "http://127.0.0.1:8079/tools/wikidata/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/kg/wikidata/test.py
|
from .utils import *
import pandas as pd
import requests
import json
from ...tool import Tool
def build_tool(config) -> Tool:
tool = Tool(
"Search in Wikidata",
"answering factual questions in wikidata.",
description_for_model="Plugin for answering factual questions in wikidata.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
sparql = Slot2Sparql()
@tool.get("/find_entity")
def find_entity(input):
"""Find all <r, t> that has the relation <input, r, t>. It looks like viewing the main page of the input entity. The result is a table.
"""
try:
sparqlIdx = -1
if input[0] == '#':
input = {'id': int(input[1:]), 'attr': 'tmp'}
elif input[0] == 'Q':
input = {'id': input, 'attr': 'wd'}
elif input[0] == 'P':
input = {'id': input, 'attr': 'wdt'}
elif input[0] == '@':
input = {'id': input[1:], 'attr': 'wds'}
else:
input = {'id': input, 'attr': 'val'}
sparql.find_entity(input)
sparqlIdx = len(sparql.select_lst)-1
query, ids = sparql.give_str(sparqlIdx)
query += '\nLIMIT 2000'
ids = ['#' + str(id['id']) for id in ids]
result = getResult(query)
variable_name = [enc(sparql.select_lst[sparqlIdx].state[-1][1])
[1:], enc(sparql.select_lst[sparqlIdx].state[-1][2])[1:], enc(sparql.select_lst[sparqlIdx].state[-1][3])[1:]]
response = [{} for i in range(0, len(result))]
print("RESULT:", result)
for idx, re in enumerate(result):
response[idx].update(get_property_details(re[variable_name[0]]['value']) if re[variable_name[0]]['type'] == 'uri' else {
'relation': '',
'relationLabel': re[variable_name[0]]['value'],
'relationDescription': '',
# 'propuri': ''
})
response[idx].update({
'tail': re[variable_name[1]]['value'].split('/')[-1] if re[variable_name[1]]['type'] == 'uri' else '',
'tailLabel': re.get(variable_name[1] + 'Label', {'value': ''})['value'] if re[variable_name[1]]['type'] == 'uri' else re[variable_name[1]]['value'],
'tailDescription': re.get(variable_name[1] + 'Description', {'value': ''})['value'],
# 'tailuri': re[variable_name[1]]['value'] if re[variable_name[1]]['type'] == 'uri' else '',
# 'tailtype': 'uri' if re[variable_name[1]]['type'] == 'uri' else re[variable_name[1]].get('datatype', '')
})
if variable_name[2] in re:
response[idx].update({
'time': re.get(variable_name[2] + 'Label', {'value': ''})['value'] if re[variable_name[2]]['type'] == 'uri' else re[variable_name[2]]['value'],
})
else:
response[idx].update({
'time': "ALWAYS"
})
df = pd.DataFrame.from_dict(response)
return df.to_markdown()
except Exception:
print("Invalid option!\n", Exception)
return df.to_markdown()
@tool.get("/find_entity_by_tail")
def find_entity_by_tail(input : str):
"""Find all <h, r> that has the relation <h, r, input>. It looks like viewing the reverse main page of the input entity. The result is a table.
"""
try:
sparqlIdx = -1
if input[0] == '#':
input = {'id': int(input[1:]), 'attr': 'tmp'}
elif input[0] == 'Q':
input = {'id': input, 'attr': 'wd'}
elif input[0] == 'P':
input = {'id': input, 'attr': 'wdt'}
elif input[0] == '@':
input = {'id': input[1:], 'attr': 'wds'}
else:
input = {'id': input, 'attr': 'val'}
sparql.find_entity_by_tail(input)
sparqlIdx = len(sparql.select_lst)-1
query, ids = sparql.give_str(sparqlIdx)
query += '\nLIMIT 2000'
ids = ['#' + str(id['id']) for id in ids]
result = getResult(query)
variable_name = [enc(sparql.select_lst[sparqlIdx].state[-1][0])
[1:], enc(sparql.select_lst[sparqlIdx].state[-1][1])[1:]]
response = [{} for i in range(0, len(result))]
for idx, re in enumerate(result):
response[idx].update(get_property_details(re[variable_name[1]]['value']) if re[variable_name[1]]['type'] == 'uri' else {
'relation': '',
'relationLabel': re[variable_name[1]]['value'],
'relationDescription': '',
# 'labelUri': ''
})
response[idx].update({
'head': re[variable_name[0]]['value'].split('/')[-1] if re[variable_name[0]]['type'] == 'uri' else '',
'headLabel': re.get(variable_name[0] + 'Label', {'value': ''})['value'] if re[variable_name[0]]['type'] == 'uri' else re[variable_name[0]]['value'],
'headDescription': re.get(variable_name[0] + 'Description', {'value': ''})['value'],
# 'headUri': re[variable_name[0]]['value'] if re[variable_name[0]]['type'] == 'uri' else '',
# 'headType': 'uri' if re[variable_name[0]]['type'] == 'uri' else re[variable_name[0]].get('datatype', '')
})
df = pd.DataFrame.from_dict(response)
return df.to_markdown()
except Exception:
print("Invalid option!\n", Exception)
return pd.DataFrame().to_markdown()
@tool.get("/get_entity_id")
def get_entity_id(input : str):
"""Search for all the entities that has the surface form as the input. For example, all the entities that are named ``Obama'', including either person, book, anything else.
"""
try:
result = requests.get("https://www.wikidata.org/w/api.php", params={
"type": "item",
"action": "wbsearchentities",
"language": "en",
"search": input,
"origin": "*",
"format": "json"
}).text
result = json.loads(result)["search"]
# print(result)
df = pd.DataFrame.from_dict(result)
for row in df.axes[1]:
if row != "id" and row != "label" and row != "description":
df.pop(row)
return df.to_markdown()
except Exception:
print("Invalid option!\n", Exception)
return pd.DataFrame().to_markdown()
@tool.get("/get_relation_id")
def get_relation_id(input : str):
"""Search for all the relations that has the surface form as the input. For example, all the relations that are named ``tax''.
"""
try:
result = requests.get("https://www.wikidata.org/w/api.php", params={
"type": "property",
"action": "wbsearchentities",
"language": "en",
"search": input,
"origin": "*",
"format": "json"
}).text
result = json.loads(result)["search"]
df = pd.DataFrame.from_dict(result)
for row in df.axes[1]:
if row != "id" and row != "label" and row != "description":
df.pop(row)
return df.to_markdown()
except Exception:
print("Invalid option!\n", Exception)
return pd.DataFrame().to_markdown()
@tool.get("/search_by_code")
def search_by_code(query : str):
"""After knowing the unique id of entity or relation, perform a sparql query. E.g.,
Select ?music\nWhere {{\nwd:Q00 wdt:P00 ?music.\n}} The entity label will be automatically retrieved."""
try:
query, basic_sel = convert_sparql_to_backend(query)
result = getResult(query)
for i in range(0, len(result)):
for sel in basic_sel:
if sel not in result[i]:
continue
if len(result[i][sel]['value']) < 4 or result[i][sel]['value'][0:4] != 'http':
continue
id = result[i][sel]['value'].split('/')[-1]
if type(id) == str and len(id) > 0 and id[0] == 'P':
result[i].update(
convert(get_property_details_with_name(result[i][sel]['value'], sel)))
df = pd.DataFrame.from_dict(result)
return df.to_markdown()
except Exception:
print("Invalid option!\n", Exception)
return pd.DataFrame().to_markdown()
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/kg/wikidata/api.py
|
from copy import deepcopy
from SPARQLWrapper import SPARQLWrapper, JSON
import csv
import regex as re
import os
DIRPATH = os.path.dirname(os.path.abspath(__file__))
# Dictionary to store all property labels and description
class PropertyDetails:
def __init__(self):
self.prop_details = dict()
with open(f'{DIRPATH}/property.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
for prop in reader:
self.prop_details[prop[0]] = [prop[1], prop[2]]
def get_details(self, prop_id):
return self.prop_details.get(prop_id, ['', ''])
sid_num = 0
propdetails = PropertyDetails()
def convert_sparql_to_backend(query):
all_var_str = '[() ]\?[a-zA-Z0-9_-]+[() ]'
filter_str = r'\(.+\(.* (\?.+)\) [Aa][Ss].*\)'
sparql_split = query.split('\n')
select = sparql_split[0]
select += ' '
sel_list = re.findall(all_var_str, select, overlapped=True)
sel_list = [sel[1:-1] for sel in sel_list]
rm_list = re.findall(filter_str, select)
for sel in rm_list:
sel_list.remove(sel)
# print(sel_list)
added_sel_list = []
basic_sel_list = []
for sel in sel_list:
if len(sel) > 0 and sel[0] == '?':
basic_sel_list.append(sel)
added_sel_list.append(sel + 'Label')
added_sel_list.append(sel + 'Description')
if len(rm_list) == 0:
for sel in added_sel_list:
select += ' ' + sel
# print(select)
sparql_split[0] = select
service_pos = -1
query = '\n'.join(sparql_split)
for i in range(len(query)-1, -1, -1):
if query[i] == '}':
service_pos = i
break
query = query[:service_pos] + \
'SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }\n' + \
query[service_pos:] + '\nLIMIT 200'
basic_sel_list = [b[1:] for b in basic_sel_list]
return query, basic_sel_list
def get_property_details_with_name(url: str, name: str):
id = url.split('/')[-1]
checkConst = url.split('/')[4]
if len(id) > 0 and id[0] == '#':
return {
name: id,
name + 'Label': '',
name + 'Description': '',
# name + 'uri': ''
}
elif checkConst[0] == '"':
label = url.split('"')[1]
type = url.split('<')[-1]
type = type[0:len(type)-1]
return {
name: '',
name + 'Label': label,
name + 'Description': "",
# name + 'uri': '',
# 'type': type
}
prop = propdetails.get_details(id)
id = id.split('+')
if len(id) == 1:
return {
name: id[0],
name + 'Label': prop[0],
name + 'Description': prop[1],
# 'propuri': url
}
labels = [propdetails.get_details(id_)[0] for id_ in id]
return {
name: '+'.join(id),
name + 'Label': '+'.join(labels),
name + 'Description': '',
# name + 'uri': ''
}
def convert(dictt):
for key in dictt:
dictt[key] = {'value': dictt[key]}
return dictt
def getResult(query):
sparql = SPARQLWrapper('https://query.wikidata.org/sparql',
agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36')
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query().convert()
return result['results']['bindings']
def get_details_sparql(id):
condition = 'wd:{} rdfs:label ?label. wd:{} schema:description ?description. FILTER(lang(?label) = "en" && lang(?description) = "en")'.format(
id, id)
query = 'SELECT ?label ?description \nWHERE\n{\n' + \
condition + '\n}\n LIMIT 1'
return query
# Get label & description of an entity
def get_entity_details(url: str):
id = url.split('/')[-1]
if len(id) > 0 and id[0] == '#':
return {
'id': id,
'label': None,
'description': None,
'uri': None
}
if not (len(id) > 0 and id[0] in ['P', 'Q']):
return {
'id': id,
'label': None,
'description': None,
'uri': None
}
if (url[0:4] != 'http'):
return {
'id': None,
'label': None,
'description': None,
'uri': None
}
if url[7:23] != "www.wikidata.org":
return {
'id': None,
'label': None,
'description': None,
'uri': url
}
result = getResult(get_details_sparql(id))
if len(result) == 0:
return {
'id': id,
'label': None,
'description': None,
'uri': url,
'type': ''
}
response = {
'id': id,
'label': result[0].get('label', {'value': ''})['value'],
'description': result[0].get('description', {'value': ''})['value'],
'uri': url,
'type': 'uri'
}
return response
# Get label & description of a property
def get_property_details(url: str):
id = url.split('/')[-1]
checkConst = url.split('/')[4]
if len(id) > 0 and id[0] == '#':
return {
'prop': id,
'propLabel': '',
'propDescription': '',
# 'propuri': ''
}
elif checkConst[0] == '"':
label = url.split('"')[1]
type = url.split('<')[-1]
type = type[0:len(type)-1]
return {
'prop': '',
'propLabel': label,
'propDescription': "",
# 'propuri': '',
# 'type': type
}
prop = propdetails.get_details(id)
id = id.split('+')
if len(id) == 1:
return {
'prop': id[0],
'propLabel': prop[0],
'propDescription': prop[1],
# 'propuri': url
}
labels = [propdetails.get_details(id_)[0] for id_ in id]
return {
'prop': '+'.join(id),
'propLabel': '+'.join(labels),
'propDescription': '',
# 'propuri': ''
}
def enc(i):
assert 'attr' in i and i['id'] != None
global sid_num
if i['attr'] == 'tmp':
return '?'+'tmp'+str(i['id'])+'_'
if i['attr'] == 'val':
return str(i['id'])
if i['attr'] == 'sid':
return '?'+'sid_'+str(i['id'])
if len(i['id'].split('|')) > 1:
Str = ''
for Id in i['id'].split('|'):
sid_num += 1
Str += i['attr']+':'+Id
Str += "|"
return Str[:-1]
if i['attr'] == 'wdt':
sid_num += 1
return 'p:{} ?sid_{}.\n?sid_{} ps:{}'.format(str(i['id']), sid_num, sid_num, str(i['id']))
return i['attr']+':'+str(i['id'])
class Slot2Sparql:
class selection:
def __init__(self):
self.str0 = "SELECT " # 搜索的目标字符串
self.str1 = ''
self.str2 = ''
self.select = [] # select后内容
self.select_sid = [] # 最新statementId
self.new_select = [] # count max min select 的tmp id
self.trip = [] # 下方的搜索字符串
self.tmp = [] # 临时变量
self.state = []
self.tail = [] # 尾部
self.find_tuple_match = {}
def give_str(self):
need_label = (len(self.str1) == 0)
str = self.str0
for s in self.select:
cur_enc = enc(s)
str += cur_enc
if need_label:
str += ' {}Label {}Description'.format(cur_enc, cur_enc)
if len(self.select) == 1:
str += self.str1
str += self.str2
str += ' '
for s in self.select_sid:
str += enc(s)
str += "\nWHERE\n{\n"
for s in self.trip:
str += s
if (str[-1] != '{'):
str += '.'
str += '\n'
if need_label:
str += 'SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }\n'
str += "}"
for s in self.tail:
str += '\n'
str += s
str += '\n'
return str
def set_select(self, sele):
self.str0 = "SELECT "
self.select = [sele]
self.str1 = ""
def clear_all(self):
self.str0 = "SELECT " # 搜索的目标字符串
self.str1 = ''
self.str2 = ''
self.select = [] # select后内容
self.trip = [] # 下方的搜索字符串
self.tmp = [] # 临时变量
self.state = []
self.tail = [] # 尾部
def getCount(self):
str = self.str0
str += '(COUNT('
for s in self.select:
cur_enc = enc(s)
str += cur_enc
if len(self.select) == 1:
str += self.str1
str += ' '
str += ') AS ?cnt)'
str += "\nWHERE\n{\n"
for s in self.trip:
str += s
str += '.'
str += '\n'
str += "}"
for s in self.tail:
str += '\n'
str += s
str += '\n'
return str
def __init__(self):
self.select_lst = []
self.num = 0
sid_num = 0
def clear_all(self):
self.select_lst = []
self.num = 0
def new_select_lst(self):
self.select_lst.append(self.selection())
def prev_select_lst(self, idx):
self.select_lst.append(deepcopy(self.select_lst[idx]))
def find_select_lst(self, tar):
assert tar['attr'] == 'tmp' and tar['id'] < self.num
if tar in self.select_lst[-1].tmp:
return
for i in range(len(self.select_lst) - 2, -1, -1):
if tar in self.select_lst[i].select:
self.select_lst[-1].trip += self.select_lst[i].trip # 下方的搜索字符串
self.select_lst[-1].state += self.select_lst[i].state
self.select_lst[-1].tmp += self.select_lst[i].tmp
self.select_lst[-1].tail += self.select_lst[i].tail # 尾部
return
def head(self,):
pass
def body(self,):
pass
def find_tuple(self, tup):
self.new_select_lst()
target = []
for i in tup:
if tup[i]['attr'] == 'tmp' and tup[i]['id'] == None:
# 新的临时变量
tup[i]['id'] = self.num
self.num += 1
target.append(tup[i])
self.select_lst[-1].find_tuple_match[i] = enc(tup[i])[1:]
self.select_lst[-1].tmp.append(tup[i])
elif tup[i]['attr'] == 'tmp':
assert tup[i]['id'] < self.num
self.find_select_lst(tup[i])
target.append(tup[i])
self.select_lst[-1].find_tuple_match[i] = enc(tup[i])[1:]
if target == []:
is_triplet_full = True
for i in tup:
if tup[i]['attr'] == 'tmp':
self.find_select_lst(tup[i])
target.append(tup[i])
self.select_lst[-1].find_tuple_match[i] = enc(tup[i])[1:]
break
else:
is_triplet_full = False
self.select_lst[-1].select = target
self.select_lst[-1].state.append([tup['x'], tup['y'], tup['z']])
if type(tup['y']['id']) == str:
y_id_splited = tup['y']['id'].split('+')
else:
y_id_splited = []
tmpXZ = [tup['x']]
for i in range(len(y_id_splited)-1):
tmpXZ.append({'attr': 'tmp', 'id': self.num})
self.num += 1
tmpXZ.append(tup['z'])
idx = 0
str1 = ''
if len(y_id_splited) != 0:
for tmpY in y_id_splited:
newY = {'attr': 'wdt', 'id': tmpY}
str1 += enc(tmpXZ[idx])
str1 += ' '
str1 += enc(newY)
str1 += ' '
str1 += enc(tmpXZ[idx+1])
str1 += '.\n'
idx += 1
else:
str1 += enc(tup['x'])
str1 += ' '
str1 += enc(tup['y'])
str1 += ' '
str1 += enc(tup['z'])
str1 += '.\n'
str1 = str1[:-2]
print(str1)
self.select_lst[-1].select_sid = [{'attr': 'sid', 'id': sid_num}]
self.select_lst[-1].trip.append(str1)
if is_triplet_full:
self.change_tmpidx(target[0])
def find_entity(self, ent1):
self.new_select_lst()
self.select_lst[-1].str0 += 'DISTINCT '
self.select_lst[-1].select = [{}, {}, {}]
innerSelect = [{}, {}]
for t in self.select_lst[-1].select:
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
self.select_lst[-1].tmp.append(t)
for t in innerSelect:
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
if ent1['attr'] == 'tmp':
self.find_select_lst(ent1)
# ent1位于三元组的头
self.select_lst[-1].state.append(
[ent1, self.select_lst[-1].select[0], self.select_lst[-1].select[1], self.select_lst[-1].select[2]])
str1 = enc(ent1)
str1 += ' '
str1 += enc(self.select_lst[-1].select[0])
str1 += ' '
str1 += enc(self.select_lst[-1].select[1])
self.select_lst[-1].trip.append("{")
self.select_lst[-1].trip.append(str1)
self.select_lst[-1].trip.append("}\nUNION\n{")
str1 = enc(ent1)
str1 += ' '
str1 += enc(innerSelect[0])
str1 += ' '
str1 += enc(innerSelect[1])
self.select_lst[-1].trip.append(str1)
str1 = enc(innerSelect[1])
str1 += ' pq:P585 '
str1 += enc(self.select_lst[-1].select[2])
str1 += ';\n'
str1 += enc(self.select_lst[-1].select[0])
str1 += ' '
str1 += enc(self.select_lst[-1].select[1])
self.select_lst[-1].trip.append(str1)
self.select_lst[-1].trip.append("}")
if ent1['attr'] == 'wds':
str1 = 'FILTER(STRSTARTS ( STR ( {} ), "http://www.wikidata.org/prop/" ))'.format(
enc(self.select_lst[-1].select[0]))
else:
str1 = 'FILTER(STRSTARTS ( STR ( {} ), "http://www.wikidata.org/prop/direct/" ) || STRSTARTS ( STR ( {} ),"http://www.wikidata.org/prop/statement/" ))'.format(
enc(self.select_lst[-1].select[0]), enc(self.select_lst[-1].select[0]))
self.select_lst[-1].trip.append(str1)
def find_entity_by_tail(self, ent1):
self.new_select_lst()
self.select_lst[-1].str0 += 'DISTINCT '
self.select_lst[-1].select = [{}, {}]
for t in self.select_lst[-1].select:
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
self.select_lst[-1].tmp.append(t)
if ent1['attr'] == 'tmp':
self.find_select_lst(ent1)
# ent1位于三元组的尾
self.select_lst[-1].state.append(
[self.select_lst[-1].select[0], self.select_lst[-1].select[1], ent1])
str1 = enc(self.select_lst[-1].select[0])
str1 += ' '
str1 += enc(self.select_lst[-1].select[1])
str1 += ' '
str1 += enc(ent1)
self.select_lst[-1].trip.append(str1)
str1 = 'FILTER(STRSTARTS ( STR ( {} ), "http://www.wikidata.org/entity/Q" ))'.format(
enc(self.select_lst[-1].select[0]))
self.select_lst[-1].trip.append(str1)
str1 = 'FILTER(STRSTARTS ( STR ( {} ), "http://www.wikidata.org/prop/" ))'.format(
enc(self.select_lst[-1].select[1]))
self.select_lst[-1].trip.append(str1)
def find_entity_by_relation(self, ent1):
self.new_select_lst()
self.select_lst[-1].str0 += 'DISTINCT '
self.select_lst[-1].select = [{}, {}]
for t in self.select_lst[-1].select:
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
self.select_lst[-1].tmp.append(t)
if ent1['attr'] == 'tmp':
self.find_select_lst(ent1)
# ent1位于三元组的尾
self.select_lst[-1].state.append(
[self.select_lst[-1].select[0], self.select_lst[-1].select[1], ent1])
str1 = enc(self.select_lst[-1].select[0])
str1 += ' '
str1 += enc(ent1)
str1 += ' '
str1 += enc(self.select_lst[-1].select[1])
self.select_lst[-1].trip.append(str1)
str1 = 'FILTER(STRSTARTS ( STR ( {} ), "http://www.wikidata.org/entity/Q" ))'.format(
enc(self.select_lst[-1].select[0]))
self.select_lst[-1].trip.append(str1)
def binary_operation(self, ent1, op, ent2):
if op in ['>', '<', '=', '!=', '>=', '<=']:
self.new_select_lst()
assert ent1['attr'] == 'tmp'
self.find_select_lst(ent1)
# 使用 filter 表示比较关系
str1 = 'FILTER ('
str1 += enc(ent1)
str1 += ' '
str1 += op
str1 += ' '
str1 += enc(ent2)
str1 += ')'
self.select_lst[-1].trip.append(str1)
self.select_lst[-1].select = [ent1]
self.change_tmpidx(ent1)
if ent2['attr'] == 'tmp':
self.select_lst[-1].select.append(ent2)
elif op in ['+', '-', '*', '/']:
self.new_select_lst()
if ent1['attr'] == 'tmp':
self.find_select_lst(ent1)
if ent2['attr'] == 'tmp':
self.find_select_lst(ent2)
# 使用新的临时变量
# BIND(?tmpxx / 365.2425 AS ?tmpxx).
t = {}
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
self.select_lst[-1].select = [t]
self.select_lst[-1].tmp.append(t)
str1 = 'BIND ('
str1 += enc(ent1)
str1 += ' '
str1 += op
str1 += ' '
str1 += enc(ent2)
str1 += ' AS '
str1 += enc(t)
str1 += ').'
self.select_lst[-1].trip.append(str1)
elif op in ['&&', '||', '~']:
self.new_select_lst()
assert ent1['attr'] == ent2['attr'] == 'tmp'
self.select_lst[-1].trip.append('{')
self.find_select_lst(ent1)
if op == '&&':
pass
elif op == '||':
self.select_lst[-1].trip.append('}\nUNION\n{')
else:
self.select_lst[-1].trip.append('}\nMINUS\n{')
self.find_select_lst(ent2)
self.select_lst[-1].trip.append('}')
# 使用新的临时变量
# BIND(?tmpxx / 365.2425 AS ?tmpxx).
t = {}
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
tmp = []
self.select_lst[-1].select = [t]
self.select_lst[-1].tmp.append(t)
self.select_lst[-1].tmp.remove(ent1)
self.select_lst[-1].tmp.remove(ent2)
for line in self.select_lst[-1].trip:
nline = line.replace(enc(ent1), enc(t))
nline = nline.replace(enc(ent2), enc(t))
tmp.append(nline)
self.select_lst[-1].trip = tmp
for line in self.select_lst[-1].state:
for i in line:
if i == ent1 or i == ent2:
i = t
tmp = []
for line in self.select_lst[-1].tail:
nline = line.replace(enc(ent1), enc(t))
nline = nline.replace(enc(ent2), enc(t))
tmp.append(nline)
self.select_lst[-1].tail = tmp
def unitary_operation(self, ent, op, last_sparql_idx):
if op in ['ORDER', 'GROUP (ASC)', 'GROUP (DESC)']:
self.new_select_lst()
self.find_select_lst(ent)
self.select_lst[-1].select = [ent]
str1 = op.split(' ')[0] + ' BY '
str1 += enc(ent)
if 'GROUP' in op.split(' '):
str1 += ' {}Label {}Description'.format(enc(ent), enc(ent))
if op.split(' ')[-1] == '(DESC)':
str1 += '\nORDER BY DESC(?cnt)'
else:
str1 += '\nORDER BY ?cnt'
self.select_lst[-1].tail.append(str1)
self.change_tmpidx(ent)
if 'GROUP' in op.split(' '):
self.select_lst[-1].str2 = ' (COUNT({}) AS ?cnt) '.format(
enc(self.select_lst[-1].select[0]))
elif op in ['LIMIT', 'OFFSET']:
self.prev_select_lst(last_sparql_idx)
str1 = op + ' '
str1 += enc(ent)
self.select_lst[-1].tail.append(str1)
self.change_tmpidx(self.select_lst[-1].select[0])
self.select_lst[-1].new_select = self.select_lst[-1].select
elif op in ['DISTINCT', 'REDUCED']:
self.new_select_lst()
self.find_select_lst(ent)
self.select_lst[-1].select = [ent]
self.select_lst[-1].str0 += op
self.select_lst[-1].str0 += ' '
elif op in ['MIN', 'MAX', 'AVG', 'SUM', 'COUNT', 'SAMPLE']:
self.new_select_lst()
self.find_select_lst(ent)
t = {}
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
self.select_lst[-1].new_select = [t]
self.select_lst[-1].tmp.append(t)
self.select_lst[-1].select = [ent]
self.select_lst[-1].str0 += '('
self.select_lst[-1].str0 += op
self.select_lst[-1].str0 += '('
self.select_lst[-1].str1 += ') AS '
self.select_lst[-1].str1 += enc(t)
self.select_lst[-1].str1 += ')'
def give_str(self, sparqlIdx=-1):
return self.select_lst[sparqlIdx].give_str(), self.select_lst[sparqlIdx].select
def give_tmp(self, sparqlIdx=-1):
return self.select_lst[sparqlIdx].tmp
def change_tmpidx(self, ent1, sparqlIdx=-1):
# 将ent1的tmp_id更新
t = {}
t['attr'] = 'tmp'
t['id'] = self.num
self.num += 1
tmp = []
self.select_lst[sparqlIdx].select = [t]
self.select_lst[sparqlIdx].tmp.append(t)
self.select_lst[sparqlIdx].tmp.remove(ent1)
for line in self.select_lst[sparqlIdx].trip:
nline = line.replace(enc(ent1), enc(t))
tmp.append(nline)
self.select_lst[sparqlIdx].trip = tmp
for line in self.select_lst[sparqlIdx].state:
for i in line:
if i == ent1:
i = t
tmp = []
for line in self.select_lst[sparqlIdx].tail:
nline = line.replace(enc(ent1), enc(t))
tmp.append(nline)
self.select_lst[sparqlIdx].tail = tmp
self.select_lst[sparqlIdx].str2 = self.select_lst[sparqlIdx].str2.replace(
enc(ent1), enc(t))
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/kg/wikidata/utils.py
|
from ..registry import register
@register("weather")
def weather():
from .api import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/weather/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'weather', "http://127.0.0.1:8079/tools/weather/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("What's the weather in London today?")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/weather/test.py
|
import requests
import json
from ..tool import Tool
import os
def build_tool(config) -> Tool:
tool = Tool(
"Weather Info",
"Look up weather information",
name_for_model="Weather",
description_for_model="Plugin for look up weather information",
logo_url="https://cdn.weatherapi.com/v4/images/weatherapi_logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
KEY = os.environ.get('WEATHER_API_KEYS', '')
if KEY == '':
raise RuntimeError("WEATHER_API_KEY not provided, please register one following https://www.weatherapi.com/docs/ and add it to environment variables.")
URL_CURRENT_WEATHER= "http://api.weatherapi.com/v1/current.json"
URL_FORECAST_WEATHER = "http://api.weatherapi.com/v1/forecast.json"
@tool.get("/get_weather_today")
def get_weather_today(location : str):
'''gets the weather
'''
param = {
"key": KEY,
"q": location
}
res_completion = requests.get(URL_CURRENT_WEATHER, params=param)
data = json.loads(res_completion.text.strip())
output = {}
output["overall"]= f"{data['current']['condition']['text']},\n"
output["name"]= f"{data['location']['name']},\n"
output["region"]= f"{data['location']['region']},\n"
output["country"]= f"{data['location']['country']},\n"
output["localtime"]= f"{data['location']['localtime']},\n"
output["temperature"]= f"{data['current']['temp_c']}(C), {data['current']['temp_f']}(F),\n"
output["percipitation"]= f"{data['current']['precip_mm']}(mm), {data['current']['precip_in']}(inch),\n"
output["pressure"]= f"{data['current']['pressure_mb']}(milibar),\n"
output["humidity"]= f"{data['current']['humidity']},\n"
output["cloud"]= f"{data['current']['cloud']},\n"
output["body temperature"]= f"{data['current']['feelslike_c']}(C), {data['current']['feelslike_f']}(F),\n"
output["wind speed"]= f"{data['current']['gust_kph']}(kph), {data['current']['gust_mph']}(mph),\n"
output["visibility"]= f"{data['current']['vis_km']}(km), {data['current']['vis_miles']}(miles),\n"
output["UV index"]= f"{data['current']['uv']},\n"
text_output = f"Today's weather report for {data['location']['name']} is:\n"+"".join([f"{key}: {output[key]}" for key in output.keys()])
return text_output
@tool.get("/forecast_weather")
def forecast_weather(location : str, days : str):
'''Forecast weather in the upcoming days. Args: - location: str - days: int
'''
param = {
"key": KEY,
"q": location,
"days": int(days),
}
res_completion = requests.get(URL_FORECAST_WEATHER, params=param)
res_completion = json.loads(res_completion.text.strip())
MAX_DAYS = 14
res_completion = res_completion["forecast"]["forecastday"][int(days)-1 if int(days) < MAX_DAYS else MAX_DAYS-1]
output_dict = {}
for k, v in res_completion["day"].items():
output_dict[k] = v
for k, v in res_completion["astro"].items():
output_dict[k] = v
output = {}
output["over all weather"] = f"{output_dict['condition']['text']},\n"
output["max temperature"] = f"{output_dict['maxtemp_c']}(C), {output_dict['maxtemp_f']}(F),\n"
output["min temperature"] = f"{output_dict['mintemp_c']}(C), {output_dict['mintemp_f']}(F),\n"
output["average temperature"] = f"{output_dict['avgtemp_c']}(C), {output_dict['avgtemp_f']}(F),\n"
output["max wind speed"] = f"{output_dict['maxwind_kph']}(kph), {output_dict['maxwind_mph']}(mph),\n"
output["total precipitation"] = f"{output_dict['totalprecip_mm']}(mm), {output_dict['totalprecip_in']}(inch),\n"
output["will rain today"] = f"{output_dict['daily_will_it_rain']},\n"
output["chance of rain"] = f"{output_dict['daily_chance_of_rain']},\n"
output["total snow"] = f"{output_dict['totalsnow_cm']}(cm),\n"
output["will snow today"] = f"{output_dict['daily_will_it_snow']},\n"
output["chance of snow"] = f"{output_dict['daily_chance_of_snow']},\n"
output["average visibility"] = f"{output_dict['avgvis_km']}(km), {output_dict['avgvis_miles']}(miles),\n"
output["average humidity"] = f"{output_dict['avghumidity']},\n"
output["UV index"] = f"{output_dict['uv']},\n"
output["sunrise time"] = f"{output_dict['sunrise']},\n"
output["sunset time"] = f"{output_dict['sunset']},\n"
output["moonrise time"] = f"{output_dict['moonrise']},\n"
output["moonset time"] = f"{output_dict['moonset']},\n"
text_output = f"The weather forecast for {param['q']} at {param['days']} days later is: \n"+"".join([f"{key}: {output[key]}" for key in output.keys()])
return text_output
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/weather/api.py
|
from ..registry import register
@register("stock")
def stock():
from .api import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/stock/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'Stock', "http://127.0.0.1:8079/tools/stock/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("What's the close price of Apple stock at 2022/02/21?")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/stock/test.py
|
import requests
import json
import os
from ..tool import Tool
alphavantage_key = os.getenv("ALPHA_VANTAGE_KEY", None)
def build_tool(config) -> Tool:
tool = Tool(
"Stock Info",
"Look up stock information",
name_for_model="Stock",
description_for_model="Plugin for look up stock information",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com"
)
functions = ['TIME_SERIES_INTRADAY', 'TIME_SERIES_INTRADAY_EXTENDED','TIME_SERIES_DAILY', 'TIME_SERIES_DAILY_ADJUSTED']
types = ['open', 'close', 'high', 'low']
KEY = config["key"]
if alphavantage_key is not None:
KEY = os.getenv("ALPHA_VANTAGE_KEY", None)
BASE_URL = 'https://www.alphavantage.co/query?'
def get_json_data(function, symbol, interval = '5min', adjusted='true', outputsize='compact', datatype='json'):
url = BASE_URL + 'function=' + function + '&symbol=' + symbol + '&apikey=' + KEY
r = requests.get(url)
data = json.loads(r.text)
return data
@tool.get("/get_today_date")
def get_today_date():
'''Get today's date
'''
from datetime import date
today = date.today()
return today.strftime("%Y-%m-%d")
@tool.get('/get_daily_prices')
def get_daily_prices(symbol : str, date : str = ''):
'''Get the stock price of an entity in the stock market. Date should be pass as 'yyyy-mm-dd'.
'''
if "," in symbol:
symbol, date = symbol.split(",")
if date.strip() == "":
return "Please specify a date and try again. You can you get_today_date to up-to-date time information."
data = get_json_data('TIME_SERIES_DAILY_ADJUSTED', symbol)
#print(data.keys())
time_series = data["Time Series (Daily)"]
final_time = ''
print(time_series)
# 查找最接近查询日期的数据
for timestamp, daily_data in time_series.items():
print(timestamp)
if timestamp == date:
open_price = daily_data["1. open"]
high_price = daily_data["2. high"]
low_price = daily_data["3. low"]
close_price = daily_data["4. close"]
break
elif timestamp < date:
final_time = timestamp
open_price = time_series[timestamp]["1. open"]
high_price = time_series[timestamp]["2. high"]
low_price = time_series[timestamp]["3. low"]
close_price = time_series[timestamp]["4. close"]
break
return {'open':open_price, 'close':close_price, 'high':high_price, 'low':low_price, 'symbol':symbol, 'date':final_time}
@tool.get('/get_open_info')
def get_open_info(region : str = 'United States'):
'''get information about if the market in the region is open
'''
url = 'https://www.alphavantage.co/query?function=MARKET_STATUS&apikey=' + KEY
r = requests.get(url)
data = json.loads(r.text)
for item in data['markets']:
if item['region'] == region:
return item['current_status']
return ' not found'
@tool.get('/get_exchange_rate')
def get_exchange_rate(from_currency : str = 'USD', to_currency : str = 'BTC'):
'''This API returns the realtime exchange rate for a pair of digital currency (e.g., Bitcoin) and physical currency (e.g., USD).
'''
url = 'https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency='+ from_currency + '&to_currency='+ to_currency + '&apikey=' + KEY
r = requests.get(url)
data = json.loads(r.text)
try:
rate = data['Realtime Currency Exchange Rate']['5. Exchange Rate']
return rate
except:
return 'error'
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/stock/api.py
|
from ..registry import register
@register("chemical-prop")
def chemical_prop():
from .prop import build_tool
return build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/chemical/__init__.py
|
from .api import build_tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/chemical/prop/__init__.py
|
from bmtools.agent.singletool import load_single_tools, STQuestionAnswerer
tool_name, tool_url = 'chemical-prop', "http://127.0.0.1:8079/tools/chemical-prop/"
tools_name, tools_config = load_single_tools(tool_name, tool_url)
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("How many benzene rings are there in 9H-Carbazole-3-carboxaldehyde?")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/chemical/prop/test.py
|
import requests
from pydantic import BaseModel
import numpy as np
from bs4 import BeautifulSoup
import json
from ...tool import Tool
from typing import List, Optional, Union
class GetNameResponse(BaseModel):
"""name list"""
names: List[str]
class GetStructureResponse(BaseModel):
"""structure list"""
state : int
content : Optional[str] = None
class GetIDResponse(BaseModel):
state : int
content : Union[str, List[str]]
def build_tool(config) -> Tool:
tool = Tool(
"Chemical Property Plugin",
description="looking up a chemical's property",
name_for_model="Chemical Property",
description_for_model="Plugin for looking up a chemical's property using a chemical knowledge base. All input should be a json like {'input': 'some input'}. Please use the provided questions and search step by step.",
logo_url="https://your-app-url.com/.well-known/logo.png",
contact_email="hello@contact.com",
legal_info_url="hello@legal.com",
)
QRY = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/'
@tool.get("/get_name")
def get_name( cid: str ):
"""prints the possible 3 synonyms of the queried compound ID"""
html_doc = requests.get(QRY+'cid/'+str(cid)+'/synonyms/XML').text
soup = BeautifulSoup(html_doc,"html.parser",from_encoding="utf-8")
syns = soup.find_all('synonym')
ans = []
kk = 3
for syn in syns[:kk]:
ans.append(syn.text)
js = {'names':ans}
return js
@tool.get("/get_allname")
def get_allname( cid: str ):
"""prints all the possible synonyms (might be too many, use this function carefully).
"""
html_doc = requests.get(QRY+'cid/'+str(cid)+'/synonyms/XML').text
soup = BeautifulSoup(html_doc,"html.parser",from_encoding="utf-8")
syns = soup.find_all('synonym')
ans = []
for syn in syns:
ans.append(syn.text)
js = {'names':ans}
return js
@tool.get("/get_id_by_struct")
def get_id_by_struct(smiles : str):
"""prints the ID of the queried compound SMILES. This should only be used if smiles is provided or retrieved in the previous step. The input should not be a string, but a SMILES formula.
"""
html_doc = requests.get(QRY+'smiles/'+smiles+'/cids/XML').text
soup = BeautifulSoup(html_doc,"html.parser",from_encoding="utf-8")
cids = soup.find_all('cid')
if cids is not None:#len(cids)==1:
if len(cids)==1:
ans = cids[0].text
js = {'state': 'matched', 'content': ans}
return js
js = {'state': 'no result'}
return js
@tool.get("/get_id")
def get_id(name : str):
"""prints the ID of the queried compound name, and prints the possible 5 names if the queried name can not been precisely matched,
"""
html_doc = requests.get(QRY+'name/'+name+'/cids/XML').text
soup = BeautifulSoup(html_doc,"html.parser",from_encoding="utf-8")
cids = soup.find_all('cid')
if cids is not None:#len(cids)==1:
if len(cids)>0:
ans = cids[0].text
js = {'state':'precise', 'content':ans}
return js
html_doc = requests.get(QRY+'name/'+name+'/cids/XML?name_type=word').text
soup = BeautifulSoup(html_doc,"html.parser",from_encoding="utf-8")
cids = soup.find_all('cid')
if len(cids) > 0:
if name in get_name(cids[0].text, ifprint=0):
ans = cids[0].text
js = {'state':'precise', 'content':ans}
return js
ans = []
seq = np.arange(len(cids))
np.random.shuffle(seq)
for sq in seq[:5]:
cid = cids[sq]
nms = get_name(cid.text, ifprint=0)
ans.append(nms)
js = {'state':'not precise', 'content':ans}
print(js)
return js
@tool.get("/get_prop")
def get_prop(cid : str):
"""prints the properties of the queried compound ID
"""
html_doc = requests.get(QRY+'cid/'+cid+'/property/MolecularFormula,MolecularWeight,CanonicalSMILES,IsomericSMILES,IUPACName,XLogP,ExactMass,MonoisotopicMass,TPSA,Complexity,Charge,HBondDonorCount,HBondAcceptorCount,RotatableBondCount,HeavyAtomCount,CovalentUnitCount/json').text
js = json.loads(html_doc)['PropertyTable']['Properties'][0]
return js
return tool
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/tools/chemical/prop/api.py
|
# coding=utf-8
# Copyright 2020 Optuna, Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# BMTools copied from Huggingface Transformers
""" Logging utilities."""
import logging
import os
import sys
import threading
from logging import CRITICAL # NOQA
from logging import DEBUG # NOQA
from logging import ERROR # NOQA
from logging import FATAL # NOQA
from logging import INFO # NOQA
from logging import NOTSET # NOQA
from logging import WARN # NOQA
from logging import WARNING # NOQA
from typing import Optional
_lock = threading.Lock()
_default_handler: Optional[logging.Handler] = None
log_levels = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
_default_log_level = logging.INFO
def _get_default_logging_level():
"""
If BMTOOLS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
not - fall back to ``_default_log_level``
"""
env_level_str = os.getenv("BMTOOLS_VERBOSITY", None)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option BMTOOLS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys()) }"
)
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> logging.Logger:
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
_default_handler.flush = sys.stderr.flush
formatter = logging.Formatter(
"\033[1;31m[%(levelname)s|(BMTools)%(module)s:%(lineno)d]%(asctime)s >> \033[0m %(message)s")
_default_handler.setFormatter(formatter)
# Apply our default configuration to the library root logger.
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def _reset_library_root_logger() -> None:
global _default_handler
with _lock:
if not _default_handler:
return
library_root_logger = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
_default_handler = None
def get_log_levels_dict():
return log_levels
def get_verbosity() -> int:
"""
Return the current level for the 🤗 Transformers's root logger as an int.
Returns:
:obj:`int`: The logging level.
<Tip>
🤗 Transformers has following logging levels:
- 50: ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL``
- 40: ``transformers.logging.ERROR``
- 30: ``transformers.logging.WARNING`` or ``transformers.logging.WARN``
- 20: ``transformers.logging.INFO``
- 10: ``transformers.logging.DEBUG``
</Tip>"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def set_verbosity(verbosity: int) -> None:
"""
Set the verbosity level for the 🤗 Transformers's root logger.
Args:
verbosity (:obj:`int`):
Logging level, e.g., one of:
- ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL``
- ``transformers.logging.ERROR``
- ``transformers.logging.WARNING`` or ``transformers.logging.WARN``
- ``transformers.logging.INFO``
- ``transformers.logging.DEBUG``
"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(verbosity)
def set_verbosity_info():
"""Set the verbosity to the ``INFO`` level."""
return set_verbosity(INFO)
def set_verbosity_warning():
"""Set the verbosity to the ``WARNING`` level."""
return set_verbosity(WARNING)
def set_verbosity_debug():
"""Set the verbosity to the ``DEBUG`` level."""
return set_verbosity(DEBUG)
def set_verbosity_error():
"""Set the verbosity to the ``ERROR`` level."""
return set_verbosity(ERROR)
def disable_default_handler() -> None:
"""Disable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def enable_default_handler() -> None:
"""Enable the default handler of the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def add_handler(handler: logging.Handler) -> None:
"""adds a handler to the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
"""removes given handler from the HuggingFace Transformers's root logger."""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(handler)
def disable_propagation() -> None:
"""
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = False
def enable_propagation() -> None:
"""
Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to
prevent double logging if the root logger has been configured.
"""
_configure_library_root_logger()
_get_library_root_logger().propagate = True
def enable_explicit_format() -> None:
"""
Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows:
```
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
```
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
handler.setFormatter(formatter)
def reset_format() -> None:
"""
Resets the formatting for HuggingFace Transformers's loggers.
All handlers currently bound to the root logger are affected by this method.
"""
handlers = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(None)
def warning_advice(self, *args, **kwargs):
"""
This method is identical to ``logger.warning()``, but if env var TRANSFORMERS_NO_ADVISORY_WARNINGS=1 is set, this
warning will not be printed
"""
no_advisory_warnings = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS", False)
if no_advisory_warnings:
return
self.warning(*args, **kwargs)
logging.Logger.warning_advice = warning_advice
def get_logger(name: Optional[str] = None, verbosity='info') -> logging.Logger:
"""
Return a logger with the specified name.
This function is not supposed to be directly accessed unless you are writing a custom transformers module.
"""
if name is None:
name = _get_library_name()
_configure_library_root_logger()
logger = logging.getLogger(name)
logger.setLevel(log_levels[verbosity])
return logger
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/utils/logging.py
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/utils/__init__.py
|
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/agent/__init__.py
|
|
from langchain.llms import OpenAI
from langchain import OpenAI, LLMChain, PromptTemplate, SerpAPIWrapper
from langchain.agents import ZeroShotAgent, AgentExecutor, initialize_agent, Tool
import importlib
import json
import os
import requests
import yaml
from bmtools.agent.apitool import RequestTool
from bmtools.agent.executor import Executor, AgentExecutorWithTranslation
from bmtools import get_logger
from bmtools.agent.BabyagiTools import BabyAGI
# from bmtools.models.customllm import CustomLLM
logger = get_logger(__name__)
def import_all_apis(tool_json):
'''import all apis that is a tool
'''
doc_url = tool_json['api']['url']
response = requests.get(doc_url)
logger.info("Doc string URL: {}".format(doc_url))
if doc_url.endswith('yaml') or doc_url.endswith('yml'):
plugin = yaml.safe_load(response.text)
else:
plugin = json.loads(response.text)
server_url = plugin['servers'][0]['url']
if server_url.startswith("/"):
server_url = "http://127.0.0.1:8079" + server_url
logger.info("server_url {}".format(server_url))
all_apis = []
for key in plugin['paths']:
value = plugin['paths'][key]
api = RequestTool(root_url=server_url, func_url=key, method='get', request_info=value)
all_apis.append(api)
return all_apis
def load_single_tools(tool_name, tool_url):
# tool_name, tool_url = "datasette", "https://datasette.io/"
# tool_name, tool_url = "klarna", "https://www.klarna.com/"
# tool_name, tool_url = 'chemical-prop', "http://127.0.0.1:8079/tools/chemical-prop/"
# tool_name, tool_url = 'douban-film', "http://127.0.0.1:8079/tools/douban-film/"
# tool_name, tool_url = 'weather', "http://127.0.0.1:8079/tools/weather/"
# tool_name, tool_url = 'wikipedia', "http://127.0.0.1:8079/tools/wikipedia/"
# tool_name, tool_url = 'wolframalpha', "http://127.0.0.1:8079/tools/wolframalpha/"
# tool_name, tool_url = 'klarna', "https://www.klarna.com/"
get_url = tool_url +".well-known/ai-plugin.json"
response = requests.get(get_url)
if response.status_code == 200:
tool_config_json = response.json()
else:
raise RuntimeError("Your URL of the tool is invalid.")
return tool_name, tool_config_json
class STQuestionAnswerer:
def __init__(self, openai_api_key = "", stream_output=False, llm='ChatGPT'):
if len(openai_api_key) < 3: # not valid key (TODO: more rigorous checking)
openai_api_key = os.environ.get('OPENAI_API_KEY')
self.openai_api_key = openai_api_key
self.llm_model = llm
self.set_openai_api_key(openai_api_key)
self.stream_output = stream_output
def set_openai_api_key(self, key):
logger.info("Using {}".format(self.llm_model))
if self.llm_model == "GPT-3.5":
self.llm = OpenAI(temperature=0.0, openai_api_key=key) # use text-darvinci
elif self.llm_model == "ChatGPT":
self.llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.0, openai_api_key=key) # use chatgpt
else:
raise RuntimeError("Your model is not available.")
def load_tools(self, name, meta_info, prompt_type="react-with-tool-description", return_intermediate_steps=True):
self.all_tools_map = {}
self.all_tools_map[name] = import_all_apis(meta_info)
logger.info("Tool [{}] has the following apis: {}".format(name, self.all_tools_map[name]))
if prompt_type == "zero-shot-react-description":
subagent = initialize_agent(self.all_tools_map[name], self.llm, agent="zero-shot-react-description", verbose=True, return_intermediate_steps=return_intermediate_steps)
elif prompt_type == "react-with-tool-description":
description_for_model = meta_info['description_for_model'].replace("{", "{{").replace("}", "}}").strip()
prefix = f"""Answer the following questions as best you can. General instructions are: {description_for_model}. Specifically, you have access to the following APIs:"""
suffix = """Begin! Remember: (1) Follow the format, i.e,\nThought:\nAction:\nAction Input:\nObservation:\nFinal Answer:\n (2) Provide as much as useful information in your Final Answer. (3) YOU MUST INCLUDE all relevant IMAGES in your Final Answer using format , and include relevant links. (3) Do not make up anything, and if your Observation has no link, DO NOT hallucihate one. (4) If you have enough information, please use \nThought: I have got enough information\nFinal Answer: \n\nQuestion: {input}\n{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
self.all_tools_map[name],
prefix=prefix,
suffix=suffix,
input_variables=["input", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
logger.info("Full prompt template: {}".format(prompt.template))
tool_names = [tool.name for tool in self.all_tools_map[name] ]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
if self.stream_output:
agent_executor = Executor.from_agent_and_tools(agent=agent, tools=self.all_tools_map[name] , verbose=True, return_intermediate_steps=return_intermediate_steps)
else:
agent_executor = AgentExecutorWithTranslation.from_agent_and_tools(agent=agent, tools=self.all_tools_map[name], verbose=True, return_intermediate_steps=return_intermediate_steps)
return agent_executor
elif prompt_type == "babyagi":
# customllm = CustomLLM()
tool_str = "; ".join([t.name for t in self.all_tools_map[name]] + ["TODO"])
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\n You have access to the following APIs:"""
suffix = """YOUR CONSTRAINTS: (1) When saying anything, YOU MUST follow this format:
\nThought:\nAction:\nAction Input: \n or \nThought:\nFinal Answer:\n (2) Do not make up anything, and if your Observation has no link, DO NOT hallucihate one. (3) The Action must be one of the following: """ + tool_str + """\nQuestion: {task}\n Agent scratchpad (history actions): {agent_scratchpad}."""
todo_prompt = PromptTemplate.from_template("You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}")
todo_chain = LLMChain(llm=self.llm, prompt=todo_prompt)
# todo_chain = LLMChain(llm=customllm, prompt=todo_prompt)
# search = SerpAPIWrapper()
# tools = [
# Tool(
# name="Search",
# func=search.run,
# description="useful for when you need to answer questions about current events",
# ),
# Tool(
# name="TODO",
# func=todo_chain.run,
# description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!",
# ),
# ]
# self.all_tools_map[name] = tools
todo_tool = Tool(
name = "TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!"
)
self.all_tools_map[name].append(todo_tool)
prompt = ZeroShotAgent.create_prompt(
self.all_tools_map[name],
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context","agent_scratchpad"]
)
logger.info("Full prompt template: {}".format(prompt.template))
# specify the maximum number of iterations you want babyAGI to perform
max_iterations = 10
baby_agi = BabyAGI.from_llm(
llm=self.llm,
# llm=customllm,
prompt=prompt,
verbose=False,
tools=self.all_tools_map[name],
stream_output=self.stream_output,
return_intermediate_steps=return_intermediate_steps,
max_iterations=max_iterations,
)
return baby_agi
if __name__ == "__main__":
tools_name, tools_config = load_single_tools()
print(tools_name, tools_config)
qa = STQuestionAnswerer()
agent = qa.load_tools(tools_name, tools_config)
agent("Calc integral of sin(x)+2x^2+3x+1 from 0 to 1")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/agent/singletool.py
|
from collections import deque
from typing import Dict, List, Optional, Any
from langchain import LLMChain, OpenAI, PromptTemplate, SerpAPIWrapper
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import BaseLLM
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
from langchain.chains.base import Chain
from langchain.vectorstores import FAISS
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.agents import ZeroShotAgent, Tool, AgentExecutor
from bmtools.agent.executor import Executor, AgentExecutorWithTranslation
class ContextAwareAgent(ZeroShotAgent):
def get_full_inputs(
self, intermediate_steps, **kwargs: Any
) -> Dict[str, Any]:
"""Create the full inputs for the LLMChain from intermediate steps."""
thoughts = self._construct_scratchpad(intermediate_steps)
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
full_inputs = {**kwargs, **new_inputs}
return full_inputs
def _construct_scratchpad(
self, intermediate_steps):
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ""
# only modify the following line, [-2: ]
for action, observation in intermediate_steps[-2: ]:
thoughts += action.log
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
return thoughts
class TaskCreationChain(LLMChain):
"""Chain to generates tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_creation_template = (
"You are an task creation AI that uses the result of an execution agent"
" to create new tasks with the following objective: {objective},"
" The last completed task has the result: {result}."
" This result was based on this task description: {task_description}."
" These are incomplete tasks: {incomplete_tasks}."
" Based on the result, create new tasks to be completed"
" by the AI system that do not overlap with incomplete tasks."
" Return the tasks as an array."
)
prompt = PromptTemplate(
template=task_creation_template,
input_variables=["result", "task_description", "incomplete_tasks", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
class TaskPrioritizationChain(LLMChain):
"""Chain to prioritize tasks."""
@classmethod
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain:
"""Get the response parser."""
task_prioritization_template = (
"You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing"
" the following tasks: {task_names}."
" Consider the ultimate objective of your team: {objective}."
" Do not remove any tasks. Return the result as a numbered list, like:"
" #. First task"
" #. Second task"
" Start the task list with number {next_task_id}."
)
prompt = PromptTemplate(
template=task_prioritization_template,
input_variables=["task_names", "next_task_id", "objective"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose)
def get_next_task(task_creation_chain: LLMChain, result: Dict, task_description: str, task_list: List[str], objective: str) -> List[Dict]:
"""Get the next task."""
incomplete_tasks = ", ".join(task_list)
response = task_creation_chain.run(result=result, task_description=task_description, incomplete_tasks=incomplete_tasks, objective=objective)
new_tasks = response.split('\n')
return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()]
def prioritize_tasks(task_prioritization_chain: LLMChain, this_task_id: int, task_list: List[Dict], objective: str) -> List[Dict]:
"""Prioritize tasks."""
task_names = [t["task_name"] for t in task_list]
next_task_id = int(this_task_id) + 1
response = task_prioritization_chain.run(task_names=task_names, next_task_id=next_task_id, objective=objective)
new_tasks = response.split('\n')
prioritized_task_list = []
for task_string in new_tasks:
if not task_string.strip():
continue
task_parts = task_string.strip().split(".", 1)
if len(task_parts) == 2:
task_id = task_parts[0].strip()
task_name = task_parts[1].strip()
prioritized_task_list.append({"task_id": task_id, "task_name": task_name})
return prioritized_task_list
def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]:
"""Get the top k tasks based on the query."""
results = vectorstore.similarity_search_with_score(query, k=k)
if not results:
return []
sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True))
return [str(item.metadata['task']) for item in sorted_results]
def execute_task(vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5) -> str:
"""Execute a task."""
context = _get_top_tasks(vectorstore, query=objective, k=k)
return execution_chain.run(objective=objective, context=context, task=task)
class BabyAGI(Chain, BaseModel):
"""Controller model for the BabyAGI agent."""
task_list: deque = Field(default_factory=deque)
task_creation_chain: TaskCreationChain = Field(...)
task_prioritization_chain: TaskPrioritizationChain = Field(...)
execution_chain: AgentExecutor = Field(...)
task_id_counter: int = Field(1)
vectorstore: VectorStore = Field(init=False)
max_iterations: Optional[int] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def add_task(self, task: Dict):
self.task_list.append(task)
def print_task_list(self):
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
for t in self.task_list:
print(str(t["task_id"]) + ": " + t["task_name"])
def print_next_task(self, task: Dict):
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
print(str(task["task_id"]) + ": " + task["task_name"])
def print_task_result(self, result: str):
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
print(result)
@property
def input_keys(self) -> List[str]:
return ["objective"]
@property
def output_keys(self) -> List[str]:
return []
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Run the agent."""
objective = inputs['objective']
first_task = inputs.get("first_task", f"Make a todo list about this objective: {objective}")
self.add_task({"task_id": 1, "task_name": first_task})
num_iters = 0
while True:
if self.task_list:
self.print_task_list()
# Step 1: Pull the first task
task = self.task_list.popleft()
self.print_next_task(task)
# Step 2: Execute the task
result = execute_task(
self.vectorstore, self.execution_chain, objective, task["task_name"]
)
this_task_id = int(task["task_id"])
self.print_task_result(result)
# Step 3: Store the result in Pinecone
result_id = f"result_{task['task_id']}"
self.vectorstore.add_texts(
texts=[result],
metadatas=[{"task": task["task_name"]}],
ids=[result_id],
)
# Step 4: Create new tasks and reprioritize task list
new_tasks = get_next_task(
self.task_creation_chain, result, task["task_name"], [t["task_name"] for t in self.task_list], objective
)
for new_task in new_tasks:
self.task_id_counter += 1
new_task.update({"task_id": self.task_id_counter})
self.add_task(new_task)
self.task_list = deque(
prioritize_tasks(
self.task_prioritization_chain, this_task_id, list(self.task_list), objective
)
)
num_iters += 1
if self.max_iterations is not None and num_iters == self.max_iterations:
print("\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m")
break
return {}
@classmethod
def from_llm(
cls,
llm: BaseLLM,
prompt = None,
verbose: bool = False,
tools = None,
stream_output = None,
**kwargs
) -> "BabyAGI":
embeddings_model = OpenAIEmbeddings()
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
task_creation_chain = TaskCreationChain.from_llm(
llm, verbose=verbose
)
task_prioritization_chain = TaskPrioritizationChain.from_llm(
llm, verbose=verbose
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = ContextAwareAgent(llm_chain=llm_chain, allowed_tools=tool_names)
if stream_output:
agent_executor = Executor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
else:
agent_executor = AgentExecutorWithTranslation.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
return cls(
task_creation_chain=task_creation_chain,
task_prioritization_chain=task_prioritization_chain,
execution_chain=agent_executor,
vectorstore=vectorstore,
**kwargs
)
if __name__ == "__main__":
todo_prompt = PromptTemplate.from_template("You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}")
todo_chain = LLMChain(llm=OpenAI(temperature=0), prompt=todo_prompt)
search = SerpAPIWrapper()
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
),
Tool(
name = "TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Please be very clear what the objective is!"
)
]
prefix = """You are an AI who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}."""
suffix = """Question: {task}
{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
tools,
prefix=prefix,
suffix=suffix,
input_variables=["objective", "task", "context","agent_scratchpad"]
)
OBJECTIVE = "Write a weather report for SF today"
llm = OpenAI(temperature=0)
# Logging of LLMChains
verbose=False
# If None, will keep on going forever
max_iterations: Optional[int] = 10
baby_agi = BabyAGI.from_llm(
llm=llm,
verbose=verbose,
max_iterations=max_iterations
)
baby_agi({"objective": OBJECTIVE})
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/agent/BabyagiTools.py
|
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
import py3langid as langid
from iso639 import languages
from typing import Dict
from copy import deepcopy
import os
def detect_lang(text: str):
lang_code = langid.classify(text)[0]
lang_name = languages.get(part1=lang_code[:2]).name
return lang_name
class Translator:
def __init__(self,
openai_api_key: str = None,
model_name: str = "gpt-3.5-turbo"):
llm = self.create_openai_model(openai_api_key, model_name)
prompt = self.create_prompt()
self.chain = LLMChain(llm=llm, prompt=prompt)
def __call__(self, inputs: Dict[str, str]) -> Dict[str, str]:
question = inputs["input"]
answer = inputs["output"]
src_lang = detect_lang(answer)
tgt_lang = detect_lang(question)
if src_lang != tgt_lang:
translated_answer = self.chain.run(text=answer, language=tgt_lang)
outputs = deepcopy(inputs)
outputs["output"] = translated_answer
return outputs
else:
return inputs
def create_openai_model(self, openai_api_key: str, model_name: str) -> OpenAI:
if openai_api_key is None:
openai_api_key = os.environ.get('OPENAI_API_KEY')
llm = OpenAI(model_name=model_name,
temperature=0.0,
openai_api_key=openai_api_key)
return llm
def create_prompt(self) -> PromptTemplate:
template = """
Translate to {language}: {text} =>
"""
prompt = PromptTemplate(
input_variables=["text", "language"],
template=template
)
return prompt
if __name__ == "__main__":
lang = {
"zh": {
"question": "帮我介绍下《深海》这部电影",
"answer": "《深海》是一部中国大陆的动画、奇幻电影,由田晓鹏导演,苏鑫、王亭文、滕奎兴等人主演。剧情简介是在大海的最深处,藏着所有秘密。一位现代少女(参宿)误入梦幻的 深海世界,却因此邂逅了一段独特的生命旅程。",
},
"ja": {
"question": "映画「深海」について教えてください",
"answer": "「深海」は、中国本土のアニメーションおよびファンタジー映画で、Tian Xiaopeng が監督し、Su Xin、Wang Tingwen、Teng Kuixing などが出演しています。 あらすじは、海の最深部にはすべての秘密が隠されているというもの。 夢のような深海の世界に迷い込んだ現代少女(さんすけ)は、それをきっかけに独特の人生の旅に出くわす。 ",
},
"ko": {
"question": "영화 딥씨에 대해 알려주세요",
"answer": "\"Deep Sea\"는 Tian Xiaopeng 감독, Su Xin, Wang Tingwen, Teng Kuixing 등이 출연한 중국 본토의 애니메이션 및 판타지 영화입니다. 시놉시스는 바다 가장 깊은 곳에 모든 비밀이 숨겨져 있다는 것입니다. 현대 소녀(산스케)는 꿈 같은 심해 세계로 방황하지만 그것 때문에 독특한 삶의 여정을 만난다. ",
},
"en": {
"question": "Tell me about the movie '深海'",
"answer": "\"Deep Sea\" is an animation and fantasy film in mainland China, directed by Tian Xiaopeng, starring Su Xin, Wang Tingwen, Teng Kuixing and others. The synopsis is that in the deepest part of the sea, all secrets are hidden. A modern girl (Sansuke) strays into the dreamy deep sea world, but encounters a unique journey of life because of it. ",
},
"de": {
"question": "Erzähl mir von dem Film '深海'",
"answer": "\"Deep Sea\" ist ein Animations- und Fantasyfilm in Festlandchina unter der Regie von Tian Xiaopeng mit Su Xin, Wang Tingwen, Teng Kuixing und anderen in den Hauptrollen. Die Zusammenfassung ist, dass im tiefsten Teil des Meeres alle Geheimnisse verborgen sind. Ein modernes Mädchen (Sansuke) verirrt sich in die verträumte Tiefseewelt, trifft dabei aber auf eine einzigartige Lebensreise. ",
},
"fr": {
"question": "Parlez-moi du film 'Deep Sea'",
"answer": "\"Deep Sea\" est un film d'animation et fantastique en Chine continentale, réalisé par Tian Xiaopeng, avec Su Xin, Wang Tingwen, Teng Kuixing et d'autres. Le synopsis est que dans la partie la plus profonde de la mer, tous les secrets sont cachés. Une fille moderne (Sansuke) s'égare dans le monde onirique des profondeurs marines, mais rencontre un voyage de vie unique à cause de cela. ",
},
"ru": {
"question": "Расскажите о фильме 'Глубокое море'",
"answer": "«Глубокое море» — это анимационный и фэнтезийный фильм в материковом Китае, снятый Тянь Сяопином, в главных ролях Су Синь, Ван Тинвэнь, Тэн Куйсин и другие. Суть в том, что в самой глубокой части моря скрыты все секреты. Современная девушка (Сансукэ) заблудилась в мечтательном глубоководном мире, но из-за этого столкнулась с уникальным жизненным путешествием. ",
},
}
translator = Translator()
for source in lang:
for target in lang:
print(source, "=>", target, end=":\t")
question = lang[target]["question"]
answer = lang[source]["answer"]
inputs = {
"input": question,
"output": answer
}
result = translator(inputs)
translated_answer = result["output"]
if detect_lang(question) == detect_lang(translated_answer) == languages.get(part1=target).name:
print("Y")
else:
print("N")
print("====================")
print("Question:\t", detect_lang(question), " - ", question)
print("Answer:\t", detect_lang(answer), " - ", answer)
print("Translated Anser:\t", detect_lang(translated_answer), " - ", translated_answer)
print("====================")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/agent/translator.py
|
"""Interface for tools."""
from inspect import signature
from typing import Any, Awaitable, Callable, Optional, Union
from langchain.agents import Tool as LangChainTool
from langchain.tools.base import BaseTool
import requests
import json
import http.client
http.client._MAXLINE = 655360
from bmtools import get_logger
logger = get_logger(__name__)
class Tool(LangChainTool):
tool_logo_md: str = ""
class RequestTool(BaseTool):
"""Tool that takes in function or coroutine directly."""
description: str = ""
func: Callable[[str], str]
coroutine: Optional[Callable[[str], Awaitable[str]]] = None
max_output_len = 4000
tool_logo_md: str = ""
def _run(self, tool_input: str) -> str:
"""Use the tool."""
return self.func(tool_input)
async def _arun(self, tool_input: str) -> str:
"""Use the tool asynchronously."""
if self.coroutine:
return await self.coroutine(tool_input)
raise NotImplementedError("Tool does not support async")
def convert_prompt(self,params):
lines = "Your input should be a json: {{"
for p in params:
logger.debug(p)
optional = not p['required']
description = p.get('description', '')
if len(description) > 0:
description = "("+description+")"
lines += '"{name}" : {type}{desc},'.format(
name=p['name'],
type= p['schema']['type'],
optional=optional,
desc=description)
lines += "}}"
return lines
def __init__(self, root_url, func_url, method, request_info, **kwargs):
""" Store the function, description, and tool_name in a class to store the information
"""
url = root_url + func_url
def func(json_args):
if isinstance(json_args, str):
# json_args = json_args.replace("\'", "\"")
# print(json_args)
try:
json_args = json.loads(json_args)
except:
return "Your input can not be parsed as json, please use thought."
response = requests.get(url, json_args)
if response.status_code == 200:
message = response.text
else:
message = f"Error code {response.status_code}. You can try (1) Change your input (2) Call another function. (If the same error code is produced more than 4 times, please use Thought: I can not use these APIs, so I will stop. Final Answer: No Answer, please check the APIs.)"
message = message[:self.max_output_len] # TODO: not rigorous, to improve
return message
tool_name = func_url.replace("/", ".").strip(".")
if 'parameters' in request_info[method]:
str_doc = self.convert_prompt(request_info[method]['parameters'])
else:
str_doc = ''
description = f"- {tool_name}:\n" + \
request_info[method].get('summary', '').replace("{", "{{").replace("}", "}}") \
+ "," \
+ request_info[method].get('description','').replace("{", "{{").replace("}", "}}") \
+ str_doc \
+ f"The Action to trigger this API should be {tool_name}\n and the input parameters should be a json dict string. Pay attention to the type of parameters.\n"
logger.info("API Name: {}".format(tool_name))
logger.info("API Description: {}".format(description))
super(RequestTool, self).__init__(
name=tool_name, func=func, description=description, **kwargs
)
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/agent/apitool.py
|
from langchain.llms import OpenAI
from langchain import OpenAI, LLMChain
from langchain.agents import ZeroShotAgent, AgentExecutor
import importlib
import json
import os
import requests
import yaml
from bmtools.agent.apitool import Tool
from bmtools.agent.singletool import STQuestionAnswerer
from bmtools.agent.executor import Executor, AgentExecutorWithTranslation
from bmtools import get_logger
logger = get_logger(__name__)
def load_valid_tools(tools_mappings):
tools_to_config = {}
for key in tools_mappings:
get_url = tools_mappings[key]+".well-known/ai-plugin.json"
response = requests.get(get_url)
if response.status_code == 200:
tools_to_config[key] = response.json()
else:
logger.warning("Load tool {} error, status code {}".format(key, response.status_code))
return tools_to_config
class MTQuestionAnswerer:
"""Use multiple tools to answer a question. Basically pass a natural question to
"""
def __init__(self, openai_api_key, all_tools, stream_output=False, llm='ChatGPT'):
if len(openai_api_key) < 3: # not valid key (TODO: more rigorous checking)
openai_api_key = os.environ.get('OPENAI_API_KEY')
self.openai_api_key = openai_api_key
self.stream_output = stream_output
self.llm_model = llm
self.set_openai_api_key(openai_api_key)
self.load_tools(all_tools)
def set_openai_api_key(self, key):
logger.info("Using {}".format(self.llm_model))
if self.llm_model == "GPT-3.5":
self.llm = OpenAI(temperature=0.0, openai_api_key=key) # use text-darvinci
elif self.llm_model == "ChatGPT":
self.llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.0, openai_api_key=key) # use chatgpt
else:
raise RuntimeError("Your model is not available.")
def load_tools(self, all_tools):
logger.info("All tools: {}".format(all_tools))
self.all_tools_map = {}
self.tools_pool = []
for name in all_tools:
meta_info = all_tools[name]
question_answer = STQuestionAnswerer(self.openai_api_key, stream_output=self.stream_output, llm=self.llm_model)
subagent = question_answer.load_tools(name, meta_info, prompt_type="react-with-tool-description", return_intermediate_steps=False)
tool_logo_md = f'<img src="{meta_info["logo_url"]}" width="32" height="32" style="display:inline-block">'
for tool in subagent.tools:
tool.tool_logo_md = tool_logo_md
tool = Tool(
name=meta_info['name_for_model'],
description=meta_info['description_for_model'].replace("{", "{{").replace("}", "}}"),
func=subagent,
)
tool.tool_logo_md = tool_logo_md
self.tools_pool.append(tool)
def build_runner(self, ):
# 可以修改prompt来让模型表现更好,也可以修改tool的doc
prefix = """Answer the following questions as best you can. In this level, you are calling the tools in natural language format, since the tools are actually an intelligent agent like you, but they expert only in one area. Several things to remember. (1) Remember to follow the format of passing natural language as the Action Input. (2) DO NOT use your imagination, only use concrete information given by the tools. (3) If the observation contains images or urls which has useful information, YOU MUST INCLUDE ALL USEFUL IMAGES and links in your Answer and Final Answers using format . BUT DO NOT provide any imaginary links. (4) The information in your Final Answer should include ALL the informations returned by the tools. (5) If a user's query is a language other than English, please translate it to English without tools, and translate it back to the source language in Final Answer. You have access to the following tools (Only use these tools we provide you):"""
suffix = """\nBegin! Remember to . \nQuestion: {input}\n{agent_scratchpad}"""
prompt = ZeroShotAgent.create_prompt(
self.tools_pool,
prefix=prefix,
suffix=suffix,
input_variables=["input", "agent_scratchpad"]
)
llm_chain = LLMChain(llm=self.llm, prompt=prompt)
logger.info("Full Prompt Template:\n {}".format(prompt.template))
tool_names = [tool.name for tool in self.tools_pool]
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names)
if self.stream_output:
agent_executor = Executor.from_agent_and_tools(agent=agent, tools=self.tools_pool, verbose=True, return_intermediate_steps=True)
else:
agent_executor = AgentExecutorWithTranslation.from_agent_and_tools(agent=agent, tools=self.tools_pool, verbose=True, return_intermediate_steps=True)
return agent_executor
if __name__ == "__main__":
tools_mappings = {
"klarna": "https://www.klarna.com/",
"chemical-prop": "http://127.0.0.1:8079/tools/chemical-prop/",
"wolframalpha": "http://127.0.0.1:8079/tools/wolframalpha/",
"weather": "http://127.0.0.1:8079/tools/weather/",
}
tools = load_valid_tools(tools_mappings)
qa = MTQuestionAnswerer(openai_api_key='', all_tools=tools)
agent = qa.build_runner()
agent("How many carbon elements are there in CH3COOH? How many people are there in China?")
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/agent/tools_controller.py
|
import types
from typing import Any, Dict, List, Tuple, Union
from langchain.agents import AgentExecutor
from langchain.input import get_color_mapping
from langchain.schema import AgentAction, AgentFinish
from bmtools.agent.translator import Translator
class AgentExecutorWithTranslation(AgentExecutor):
translator: Translator = Translator()
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
try:
outputs = super().prep_outputs(inputs, outputs, return_only_outputs)
except ValueError as e:
return outputs
else:
if "input" in outputs:
outputs = self.translator(outputs)
return outputs
class Executor(AgentExecutorWithTranslation):
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
"""Run text through and get agent response."""
# Do any preparation necessary when receiving a new input.
self.agent.prepare_for_new_call()
# Construct a mapping of tool name to tool for easy lookup
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name for tool in self.tools], excluded_colors=["green"]
)
intermediate_steps: List[Tuple[AgentAction, str]] = []
# Let's start tracking the iterations the agent has gone through
iterations = 0
# We now enter the agent loop (until it returns something).
while self._should_continue(iterations):
next_step_output = self._take_next_step(
name_to_tool_map, color_mapping, inputs, intermediate_steps
)
if isinstance(next_step_output, AgentFinish):
yield self._return(next_step_output, intermediate_steps)
return
agent_action = next_step_output[0]
tool_logo = None
for tool in self.tools:
if tool.name == agent_action.tool:
tool_logo = tool.tool_logo_md
if isinstance(next_step_output[1], types.GeneratorType):
logo = f"{tool_logo}" if tool_logo is not None else ""
yield (AgentAction("", agent_action.tool_input, agent_action.log), f"Further use other tool {logo} to answer the question.")
for output in next_step_output[1]:
yield output
next_step_output = (agent_action, output)
else:
for tool in self.tools:
if tool.name == agent_action.tool:
yield (AgentAction(tool_logo, agent_action.tool_input, agent_action.log), next_step_output[1])
intermediate_steps.append(next_step_output)
# See if tool should return directly
tool_return = self._get_tool_return(next_step_output)
if tool_return is not None:
yield self._return(tool_return, intermediate_steps)
return
iterations += 1
output = self.agent.return_stopped_response(
self.early_stopping_method, intermediate_steps, **inputs
)
yield self._return(output, intermediate_steps)
return
def __call__(
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
"""
inputs = self.prep_inputs(inputs)
self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
try:
for output in self._call(inputs):
if type(output) is dict:
output = self.prep_outputs(inputs, output, return_only_outputs)
yield output
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_chain_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_chain_end(output, verbose=self.verbose)
# return self.prep_outputs(inputs, output, return_only_outputs)
return output
|
EXA-1-master
|
exa/libraries/BMTools/bmtools/agent/executor.py
|
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import os
import sys
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "FarmVibes.AI"
copyright = "2023, Microsoft"
author = "Microsoft"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinxcontrib.mermaid",
"myst_parser",
"sphinx_autodoc_typehints",
]
autosummary_generate = True
autodoc_member_order = "groupwise"
myst_heading_anchors = 3
typehints_use_rtype = False
typehints_defaults = "comma"
sys.path.insert(0, os.path.abspath("../../src"))
templates_path = ["_templates"]
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "sphinx_rtd_theme"
html_static_path = ["_static"]
html_css_files = [
"custom.css",
]
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
|
EXA-1-master
|
exa/libraries/farmvibes-ai-main/docs/source/conf.py
|
from typing import Any, Dict
import pytorch_lightning as pl
import segmentation_models_pytorch as smp
import torch
import torch.nn as nn
import torchmetrics
from torch.optim import Adam
from torch.optim.lr_scheduler import CosineAnnealingLR
class SegmentationModel(pl.LightningModule):
def __init__(
self,
lr: float,
weight_decay: float,
in_channels: int,
encoder_name: str = "resnet34",
encoder_weights: str = "imagenet",
classes: int = 1,
num_epochs: int = 10,
):
"""Initialize a new Segmentation Model instance.
Args:
lr: learning rate.
weight_decay: amount of weight decay regularization.
in_channels: number of input channels of the network.
Needs to match the number of bands/channels of the stacked NVDI raster.
encoder_name: name of the encoder used for the Unet.
See segmentation_models_pytorch for more information.
encoder_weights: name of the pretrained weights for the encoder.
Use 'imagenet' or None (random weights).
classes: number of output classes.
As we are doing a binary crop vs. non-crop segmentation, we use the default value.
num_epochs: number of training epochs. Used for the cosine annealing scheduler.
"""
super().__init__()
self.save_hyperparameters()
self.encoder_name = encoder_name
self.encoder_weights = encoder_weights
self.in_channels = in_channels
self.classes = classes
self.model = smp.FPN(
encoder_name=self.encoder_name,
encoder_weights=self.encoder_weights,
in_channels=in_channels,
classes=self.classes,
)
self.loss = nn.BCEWithLogitsLoss()
self.lr = lr
self.weight_decay = weight_decay
self.num_epochs = num_epochs
metrics = torchmetrics.MetricCollection(
{
"ap": torchmetrics.BinnedAveragePrecision(num_classes=1, thresholds=100),
"acc": torchmetrics.Accuracy(),
}
)
self.train_metrics = metrics.clone(prefix="train_")
self.val_metrics = metrics.clone(prefix="val_")
def forward(self, x: torch.Tensor):
return self.model(x)
def configure_optimizers(self):
optimizer = Adam(params=self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs, eta_min=0)
lr_scheduler = {
"scheduler": scheduler,
"name": "lr_scheduler",
}
return [optimizer], [lr_scheduler]
def _shared_step(self, batch: Dict[str, Any], batch_idx: int) -> Dict[str, Any]:
pred = self(batch["image"])
for t in pred, batch["mask"]:
assert torch.all(torch.isfinite(t))
loss = self.loss(pred, batch["mask"])
return {"loss": loss, "preds": pred.detach(), "target": batch["mask"]}
def _shared_step_end(
self, outputs: Dict[str, Any], metrics: torchmetrics.MetricCollection, prefix: str
) -> None:
m = metrics(outputs["preds"].sigmoid().flatten(), outputs["target"].flatten().to(torch.int))
self.log(f"{prefix}_loss", outputs["loss"])
self.log_dict(m)
def training_step(self, batch: Dict[str, Any], batch_idx: int) -> Dict[str, Any]:
return self._shared_step(batch, batch_idx)
def training_step_end(self, outputs: Dict[str, Any]) -> None:
self._shared_step_end(outputs, self.train_metrics, "train")
def validation_step(self, batch: Dict[str, Any], batch_idx: int) -> Dict[str, Any]:
return self._shared_step(batch, batch_idx)
def validation_step_end(self, outputs: Dict[str, Any]) -> None:
return self._shared_step_end(outputs, self.val_metrics, "val")
# Trace the model with sigmoid activation
class ModelPlusSigmoid(nn.Module):
def __init__(self, model: torch.nn.Module):
super().__init__()
self.model = model
def forward(self, x: torch.Tensor):
return self.model(x).sigmoid()
|
EXA-1-master
|
exa/libraries/farmvibes-ai-main/notebooks/crop_segmentation/notebook_lib/models.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.