python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""Misc utils."""
import logging
from pathlib import Path
from typing import List
from rich.logging import RichHandler
def setup_logger(log_dir: str):
"""Create log directory and logger."""
Path(log_dir).mkdir(exist_ok=True, parents=True)
log_path = str(Path(log_dir) / "log.txt")
handlers = [logging.FileHandler(log_path), RichHandler(rich_tracebacks=True)]
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(module)s] [%(levelname)s] %(message)s",
handlers=handlers,
)
def compute_metrics(preds: List, golds: List, task: str):
"""Compute metrics."""
mets = {"tp": 0, "tn": 0, "fp": 0, "fn": 0, "crc": 0, "total": 0}
for pred, label in zip(preds, golds):
label = label.strip().lower()
pred = pred.strip().lower()
mets["total"] += 1
if task in {
"data_imputation",
"entity_matching",
}:
crc = pred == label
elif task in {"entity_matching", "schema_matching", "error_detection_spelling"}:
crc = pred.startswith(label)
elif task in {"error_detection"}:
pred = pred.split("\n\n")[-1]
breakpoint()
crc = pred.endswith(label)
else:
raise ValueError(f"Unknown task: {task}")
# Measure equal accuracy for generation
if crc:
mets["crc"] += 1
if label == "yes":
if crc:
mets["tp"] += 1
else:
mets["fn"] += 1
elif label == "no":
if crc:
mets["tn"] += 1
else:
mets["fp"] += 1
prec = mets["tp"] / max(1, (mets["tp"] + mets["fp"]))
rec = mets["tp"] / max(1, (mets["tp"] + mets["fn"]))
acc = mets["crc"] / mets["total"]
f1 = 2 * prec * rec / max(1, (prec + rec))
return prec, rec, acc, f1
|
fm_data_tasks-main
|
fm_data_tasks/utils/utils.py
|
"""Prompt utils."""
import logging
from typing import Any, Tuple
import numpy as np
import pandas as pd
from sentence_transformers import SentenceTransformer
from fm_data_tasks.utils import constants
from fm_data_tasks.utils.data_utils import sample_train_data
logger = logging.getLogger(__name__)
def get_manual_prompt(data_dir: str, example: pd.Series) -> str:
"""Get manual prompt for data name."""
if data_dir not in constants.DATA2TASK.keys():
raise ValueError(f"{data_dir} not recognized for prompts")
subkey_attr = constants.DATA2EXAMPLE_SUBKEY_ATTR[data_dir]
if subkey_attr is None:
if not isinstance(constants.PREFIXES[data_dir], str):
raise ValueError(f"Prefix was not a string for {data_dir}")
return constants.PREFIXES[data_dir]
else:
if not isinstance(constants.PREFIXES[data_dir], dict):
raise ValueError(
f"Prefix was not a dict with {subkey_attr} subkeys for {data_dir}"
)
return constants.PREFIXES[data_dir][str(example[subkey_attr])]
def get_random_prompt(train_data: pd.DataFrame, num_examples: int = 10) -> str:
"""Get random examples for prompt from trian data."""
prefix_exs_rows = sample_train_data(train_data, num_examples)
serialized_prefixes = [
(txt + label).strip()
for txt, label in zip(prefix_exs_rows["text"], prefix_exs_rows["label_str"])
]
prefix_exs = "\n\n".join(serialized_prefixes) + "\n"
return prefix_exs
def get_validation_prompt(
validation_path: str, num_examples: int = 10, task: str = "entity_matching"
) -> str:
"""Get prompt from validation errors."""
return get_validation_embs_prompts(
df=pd.read_feather(validation_path),
num_exs=num_examples,
model_name="sentence-transformers/sentence-t5-base",
task=task,
)
def setup_st_pipeline(model_name: str) -> SentenceTransformer:
"""Get Sentence Transformer pipeline."""
logger.info("Loading SentenceTransfomer pipeline")
pipeline = SentenceTransformer(model_name)
return pipeline
def extract_st_features(
errors: pd.DataFrame, model: SentenceTransformer, text_col: str = "text"
) -> np.ndarray:
"""Extract ST features."""
logger.info("Extracting SentenceTransfomer features")
feats = model.encode(errors[text_col].tolist())
return feats
def get_hard_samples(
df: pd.DataFrame, errors_index: pd.Index, embs: np.ndarray, num_clusters: int = 10
) -> Tuple[pd.DataFrame, Any]:
"""
Choose samples that are nearby eachother in embedding space but different labels.
One sample must be an error.
"""
from sklearn.metrics.pairwise import cosine_similarity
# Upper triangle of cosine similarity matrix
sim = np.triu(cosine_similarity(embs))
top_indexes = []
for row_idx in range(sim.shape[0]):
sorted_idxs = np.argsort(sim[row_idx], axis=0)[::-1]
for idx in sorted_idxs[: num_clusters + 1]:
# Skip rows similarity to itself
if idx == row_idx:
continue
top_indexes.append([[row_idx, idx], sim[row_idx, idx]])
# Get most similar pairs
top_indexes = sorted(top_indexes, key=lambda x: x[1], reverse=True)
df_indexes = []
for i in range(len(top_indexes)):
row_idx, col_idx = top_indexes[i][0]
if (row_idx in errors_index or col_idx in errors_index) and (
df.iloc[row_idx]["label_str"] != df.iloc[col_idx]["label_str"]
):
if row_idx not in df_indexes:
df_indexes.append(row_idx)
if col_idx not in df_indexes:
df_indexes.append(col_idx)
return df.iloc[df_indexes[:num_clusters]], None
def get_validation_embs_prompts(
df: pd.DataFrame,
num_exs: int = 10,
model_name: str = "sentence-transformers/sentence-t5-base",
task: str = "entity_matching",
) -> str:
"""
Generate prompt from cluster of data errors.
We use sentence embeddings to cluster each error example.
We then select `num_exs` examples from each cluster.
If all examples are of one class, we randomly swap one
example for an instance from the validation data with the missing
class.
"""
errors = df[
df["label_str"].str.lower().str.strip() != df["preds"].str.lower().str.strip()
]
pipeline = setup_st_pipeline(model_name)
embs = extract_st_features(df, pipeline)
samples, _ = get_hard_samples(df, errors.index, embs, num_clusters=num_exs)
if task in {"entity_matching", "error_detection"}:
# Add missing class if all examples are of one Yes/No class
# (We do not do this for imputation, only Yes/No)
missing_class = None
if len(samples[samples["label_str"].str.strip() == "Yes\n"]) == len(samples):
missing_class = "No\n"
elif len(samples[samples["label_str"].str.strip() == "No\n"]) == len(samples):
missing_class = "Yes\n"
if missing_class is not None:
pre_len = len(samples)
drop_indices = np.random.choice(samples.index, 1, replace=False)
samples = samples.drop(drop_indices)
samples = samples.append(
df[df["label_str"].str.strip() == missing_class].sample(1)
)
assert len(samples) == pre_len
logger.info(f"Number of samples: {len(samples)}")
new_prompt = (
"\n\n".join(
[
(txt + label).strip()
for txt, label in zip(samples["text"], samples["label_str"])
]
)
+ "\n"
)
return new_prompt
|
fm_data_tasks-main
|
fm_data_tasks/utils/prompt_utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import shutil
import subprocess
import sys
from distutils.util import convert_path
from shutil import rmtree
from setuptools import Command, find_packages, setup
main_ns = {}
ver_path = convert_path("meerkat/version.py")
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
# Restrict ipython version based on the python version.
# https://github.com/ipython/ipython/issues/14053
# TODO: Remove this once the wheel is pushed up.
if sys.version_info[0] == 3 and sys.version_info[1] <= 8:
ipython_requirement = "IPython<8.13.0"
else:
# python >= 3.9
ipython_requirement = "IPython"
# Package meta-data.
NAME = "meerkat-ml"
DESCRIPTION = (
"Meerkat is building new data abstractions to make " "machine learning easier."
)
URL = "https://github.com/robustness-gym/meerkat"
EMAIL = "kgoel@cs.stanford.edu"
AUTHOR = "The Meerkat Team"
REQUIRES_PYTHON = ">=3.7.0"
VERSION = main_ns["__version__"]
# What packages are required for this module to be executed?
REQUIRED = [
"dataclasses>=0.6",
"pandas",
"dill>=0.3.3",
"numpy>=1.18.0",
"cytoolz",
"ujson",
"torch>=1.7.0",
"scikit-learn",
"tqdm>=4.49.0",
"datasets>=1.4.1",
"pyarrow>=11.0.0",
"PyYAML>=5.4.1",
"omegaconf>=2.0.5",
"semver>=2.13.0",
"multiprocess>=0.70.11",
"Cython>=0.29.21",
"progressbar>=2.5",
"fvcore",
"ipywidgets>=7.0.0",
ipython_requirement,
"fastapi",
"uvicorn",
"rich",
"cryptography",
"fastapi",
"wrapt",
"typer",
"jinja2",
"nbformat",
"sse-starlette",
"tabulate",
"pyparsing",
]
# Read in docs/requirements.txt
with open("docs/requirements.txt") as f:
DOCS_REQUIREMENTS = f.read().splitlines()
# What packages are optional?
EXTRAS = {
"dev": [
"black==22.12.0",
"isort>=5.12.0",
"flake8>=3.8.4",
"docformatter>=1.4",
"pytest-cov>=2.10.1",
"recommonmark>=0.7.1",
"parameterized",
"pre-commit>=2.9.3",
"twine",
"httpx",
"ray",
]
+ DOCS_REQUIREMENTS,
"embeddings-mac": [
"faiss-cpu",
"umap-learn[plot]",
],
"embeddings-linux": [
"faiss-gpu",
"umap-learn[plot]",
],
"text": [
"transformers",
"spacy>=3.0.0",
],
"vision": ["torchvision>=0.9.0", "opencv-python", "Pillow"],
"audio": ["torchaudio"],
"medimg": [
"pyvoxel",
"kaggle",
"google-cloud-storage",
"google-cloud-bigquery[bqstorage,pandas]",
],
}
EXTRAS["all"] = list(set(sum(EXTRAS.values(), [])))
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, "__version__.py")) as f:
exec(f.read(), about)
else:
about["__version__"] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = "Build and publish the package."
user_options = [("skip-upload", "u", "skip git tagging and pypi upload")]
boolean_options = ["skip-upload"]
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
self.skip_upload = False
def finalize_options(self):
self.skip_upload = bool(self.skip_upload)
def run(self):
from huggingface_hub.repository import Repository
try:
self.status("Removing previous builds…")
rmtree(os.path.join(here, "dist"))
rmtree(os.path.join(here, "build"))
except OSError:
pass
# Build static components
self.status("Building static components…")
env = os.environ.copy()
env.update({"VITE_API_URL_PLACEHOLDER": "http://meerkat.dummy"})
if os.path.exists("./meerkat/interactive/app/build"):
shutil.rmtree("./meerkat/interactive/app/build")
build_process = subprocess.run(
"npm run build",
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd="./meerkat/interactive/app",
)
if build_process.returncode != 0:
print(build_process.stdout.decode("utf-8"))
sys.exit(1)
# Package static components to a tar file and push to huggingface.
# This requires having write access to meerkat-ml.
# TODO: Consider making this a github action.
self.status("Packaging static component build...")
components_build_targz = shutil.make_archive(
base_name=f"static-build-{VERSION}",
format="gztar",
root_dir="./meerkat/interactive/app/build",
)
self.status("Uploading static build to huggingface...")
local_repo_dir = os.path.abspath(
os.path.expanduser("~/.meerkat/hf/component-static-builds")
)
repo = Repository(
local_dir=local_repo_dir,
clone_from="meerkat-ml/component-static-builds",
repo_type="dataset",
token=os.environ.get("HF_TOKEN", True),
)
shutil.move(
components_build_targz,
os.path.join(local_repo_dir, os.path.basename(components_build_targz)),
)
repo.git_pull()
repo.push_to_hub(commit_message=f"{VERSION}: new component builds")
# Build the source and wheel.
self.status("Building Source and Wheel (universal) distribution…")
os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
if self.skip_upload:
self.status("Skipping git tagging and pypi upload")
sys.exit()
self.status("Uploading the package to PyPI via Twine…")
os.system("twine upload dist/*")
self.status("Pushing git tags…")
os.system("git tag v{0}".format(about["__version__"]))
os.system("git push --tags")
sys.exit()
class BumpVersionCommand(Command):
"""
To use: python setup.py bumpversion -v <version>
This command will push the new version directly and tag it.
"""
description = "Installs the foo."
user_options = [
("version=", "v", "the new version number"),
]
@staticmethod
def status(s):
"""Prints things in bold."""
print("\033[1m{0}\033[0m".format(s))
def initialize_options(self):
self.version = None
self.base_branch = None
self.version_branch = None
self.updated_files = [
"meerkat/version.py",
"meerkat/interactive/app/package.json",
"meerkat/interactive/app/package-lock.json",
]
def finalize_options(self):
# This package cannot be imported at top level because it
# is not recognized by Github Actions.
from packaging import version
if self.version is None:
raise ValueError("Please specify a version number.")
current_version = VERSION
if not version.Version(self.version) > version.Version(current_version):
raise ValueError(
f"New version ({self.version}) must be greater than "
f"current version ({current_version})."
)
def _undo(self):
os.system(f"git restore --staged {' '.join(self.updated_files)}")
os.system(f"git checkout -- {' '.join(self.updated_files)}")
# Return to the original branch
os.system(f"git checkout {self.base_branch}")
os.system(f"git branch -D {self.version_branch}")
def run(self):
self.status("Checking current branch is 'main'")
self.base_branch = current_branch = get_git_branch()
if current_branch != "main":
raise RuntimeError(
"You can only bump the version from the 'main' branch. "
"You are currently on the '{}' branch.".format(current_branch)
)
self.status("Pulling latest changes from origin")
err_code = os.system("git pull")
if err_code != 0:
raise RuntimeError("Failed to pull from origin/main.")
self.status("Checking working directory is clean")
err_code = os.system("git diff --exit-code")
err_code += os.system("git diff --cached --exit-code")
if err_code != 0:
raise RuntimeError("Working directory is not clean.")
self.version_branch = f"bumpversion/v{self.version}"
self.status(f"Create branch '{self.version_branch}'")
err_code = os.system(f"git checkout -b {self.version_branch}")
if err_code != 0:
raise RuntimeError("Failed to create branch.")
# Change the version in meerkat/version.py
self.status(f"Updating version {VERSION} -> {self.version}")
update_version(self.version)
# TODO: Add a check to make sure the version actually updated.
# if VERSION != self.version:
# self._undo()
# raise RuntimeError("Failed to update version.")
self.status(f"npm install to bump package-lock.json")
err_code = os.system("mk install")
if err_code != 0:
self._undo()
raise RuntimeError("Failed to update package-lock.json.")
self.status(f"Adding {', '.join(self.updated_files)} to git")
err_code = os.system(f"git add {' '.join(self.updated_files)}")
if err_code != 0:
self._undo()
raise RuntimeError("Failed to add file to git.")
self.status(f"Commit with message '[bumpversion] v{self.version}'")
err_code = os.system("git commit -m '[bumpversion] v{}'".format(self.version))
if err_code != 0:
self._undo()
raise RuntimeError("Failed to commit file to git.")
# Push the commit to origin.
self.status(f"Pushing commit to origin/{self.version_branch}")
err_code = os.system(
f"git push --force --set-upstream origin {self.version_branch}"
)
if err_code != 0:
# TODO: undo the commit automatically.
self._undo()
raise RuntimeError("Failed to push commit to origin.")
os.system(f"git checkout {self.base_branch}")
os.system(f"git branch -D {self.version_branch}")
sys.exit()
def update_version(version):
import json
# Update python.
ver_path = convert_path("meerkat/version.py")
init_py = [
line if not line.startswith("__version__") else f'__version__ = "{version}"\n'
for line in open(ver_path, "r").readlines()
]
with open(ver_path, "w") as f:
f.writelines(init_py)
# Update npm.
ver_path = convert_path("meerkat/interactive/app/package.json")
with open(ver_path, "r") as f:
package_json = json.load(f)
package_json["version"] = version
with open(ver_path, "w") as f:
json.dump(package_json, f, indent=4)
f.write("\n")
def get_git_branch():
"""Return the name of the current branch."""
proc = subprocess.Popen(["git branch"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
if err is not None:
raise RuntimeError(f"Error finding git branch: {err}")
out = out.decode("utf-8").split("\n")
current_branch = [line for line in out if line.startswith("*")][0]
current_branch = current_branch.replace("*", "").strip()
return current_branch
# Where the magic happens:
setup(
name=NAME,
version=about["__version__"],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*", "demo"])
+ ["meerkat-demo"],
package_dir={"meerkat-demo": "demo"},
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
entry_points={
"console_scripts": ["mk=meerkat.cli.main:cli"],
},
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license="Apache 2.0",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
# $ setup.py publish support.
cmdclass={"upload": UploadCommand, "bumpversion": BumpVersionCommand},
)
|
meerkat-main
|
setup.py
|
"""Multiple galleries in tabs, with independent sorting."""
import meerkat as mk
df1 = mk.get("imagenette", version="160px")
df2 = mk.get("imagenette", version="160px")
sort1 = mk.gui.Sort(df1, title="Sort (Dataframe 1)")
sort2 = mk.gui.Sort(df2, title="Sort (Dataframe 2)")
galleries = mk.gui.Tabs(
tabs={
"df1": mk.gui.Gallery(sort1(df1), main_column="img"),
"df2": mk.gui.Gallery(sort2(df2), main_column="img"),
}
)
page = mk.gui.Page(
mk.gui.html.flexcol(slots=[sort1, sort2, galleries]),
id="multiple-sort",
)
page.launch()
|
meerkat-main
|
demo/multiple-sort.py
|
"""A head-to-head reader study.
The reader inspects all variants of an image and selects the best one.
"""
import numpy as np
from PIL import Image
import meerkat as mk
from meerkat.interactive import html
def add_noise(img: Image) -> Image:
"""Simulate a noisy image."""
img = np.asarray(img).copy()
img += np.round(np.random.randn(*img.shape) * 10).astype(np.uint8)
img = np.clip(img, 0, 255)
return Image.fromarray(img)
@mk.endpoint()
def on_label(index, df: mk.DataFrame):
"""Add a label to the dataframe."""
df["label"][row] = index
row.set(row + 1)
@mk.endpoint()
def go_back(row: mk.Store, df: mk.DataFrame):
row.set(max(0, row - 1))
@mk.endpoint()
def go_forward(row: mk.Store, df: mk.DataFrame):
row.set(min(row + 1, len(df) - 1))
@mk.reactive()
def get_selected(label, value_list):
# Label needs to be converted to a string because the values
# are auto converted to strings by the RadioGroup component.
# TODO: Fix this so that value list remains a list of ints.
label = str(label)
selected = value_list.index(label) if label in value_list else None
return selected
@mk.reactive()
def select_row(df, row):
# We have to do this because range indexing doesn't work with
# stores.
return df[row : row + 1]
df = mk.get("imagenette", version="160px")
df["noisy_img"] = mk.defer(df["img"], add_noise)
img_columns = ["img", "noisy_img"]
df["noisy_img"].formatters = df["img"].formatters
# Shuffle the dataset.
df = df.shuffle(seed=20)
# Randomize which images are shown on each gallery pane.
state = np.random.RandomState(20)
df["index"] = np.asarray(
[state.permutation(np.asarray(img_columns)) for _ in range(len(df))]
)
df["img1"] = mk.defer(df, lambda df: df[df["index"][0]])
df["img2"] = mk.defer(df, lambda df: df[df["index"][1]])
df["img1"].formatters = df["img"].formatters
df["img2"].formatters = df["img"].formatters
anonymized_img_columns = ["img1", "img2"]
# Initialize labels to empty strings.
df["label"] = np.full(len(df), "")
df = df.mark()
row = mk.Store(0)
label = df[row]["label"] # figure out why ["label"]["row"] doesn't work
cell_size = mk.Store(24)
back = mk.gui.core.Button(
title="<",
on_click=go_back.partial(row=row, df=df),
classes="bg-slate-100 py-3 px-6 rounded-lg drop-shadow-md w-fit hover:bg-slate-200", # noqa: E501
)
forward = mk.gui.core.Button(
title=">",
on_click=go_forward.partial(row=row, df=df),
classes="bg-slate-100 py-3 px-6 rounded-lg drop-shadow-md w-fit hover:bg-slate-200", # noqa: E501
)
label_buttons = [
mk.gui.core.Button(
title=f"{label}",
on_click=on_label.partial(index=label, df=df),
classes="bg-slate-100 py-3 px-6 rounded-lg drop-shadow-md w-fit hover:bg-slate-200", # noqa: E501
)
for label in anonymized_img_columns
]
# We need to explicitly add the markdown hashes.
label_display = mk.gui.core.markdown.Markdown(body="## Label: " + label)
display_df = select_row(df, row)
galleries = [
mk.gui.core.Gallery(display_df, main_column=main_column, cell_size=cell_size)
for main_column in anonymized_img_columns
]
page = mk.gui.Page(
html.flexcol(
[
html.gridcols2(galleries, classes="gap-4"),
html.div(label_display),
html.gridcols3(
[
back,
html.gridcols2(label_buttons, classes="gap-x-2"),
forward,
],
classes="gap-4 self-center justify-self-center w-full items-center",
),
]
),
id="reader-study",
)
page.launch()
|
meerkat-main
|
demo/reader-study.py
|
"""Display a data frame in an interactive table and gallery in a tabbed
view."""
import meerkat as mk
# Load in the imagenette dataset
df = mk.get("imagenette", version="160px")
# Create a Table and Gallery view of the data
table = mk.gui.Table(df)
gallery = mk.gui.Gallery(df, main_column="img")
# Put the two views in a Tabs component
tabs = mk.gui.Tabs(tabs={"Table": table, "Gallery": gallery})
# Show the page!
page = mk.gui.Page(
mk.gui.html.flexcol([tabs], classes="h-full"),
id="display",
)
page.launch()
|
meerkat-main
|
demo/tabbed-views.py
|
"""Display images in an interactive gallery.
This is a tutorial on how to build a basic interactive application with
Meerkat.
"""
import meerkat as mk
df = mk.get("imagenette", version="160px")
gallery = mk.gui.Gallery(df, main_column="img", tag_columns=["path", "label"])
page = mk.gui.Page(gallery, id="gallery")
page.launch()
|
meerkat-main
|
demo/tutorial-image-gallery.py
|
"""Sort an image dataset in a gallery."""
import meerkat as mk
df = mk.get("imagenette", version="160px")
df = df.mark()
sort = mk.gui.Sort(df)
# Show the sorted DataFrame in a gallery.
gallery = mk.gui.Gallery(sort(df), main_column="img")
page = mk.gui.Page(
mk.gui.html.div([sort, gallery], classes="flex flex-col"),
id="sort",
)
page.launch()
|
meerkat-main
|
demo/sort.py
|
from PIL import Image
import meerkat as mk
# Load the dataset
df = mk.get(
"poloclub/diffusiondb",
version="large_random_1k",
registry="huggingface",
)["train"]
for col in df.columns:
df[col] = df[col].to_numpy()
df["image_path"] = df["image"].defer(lambda x: x["path"])
df["image"] = (
df["image_path"]
.map(lambda x: Image.open(x))
.format(mk.format.ImageFormatterGroup())
)
# Add a filtering component
df = df.mark()
filter = mk.gui.Filter(df)
df_filtered = filter(df)
df_grouped = df_filtered.groupby("cfg").count()
df_grouped = df_grouped.rename({"height": "count"})
# Visualize the images in a gallery
gallery = mk.gui.Gallery(df_filtered, main_column="image")
# Add a plot component
plot = mk.gui.plotly.BarPlot(df_grouped, x="cfg", y="count")
table = mk.gui.Table(df_grouped)
component = mk.gui.html.gridcols2(
[
mk.gui.html.flexcol(
[
mk.gui.core.Header("DiffusionDB"),
filter,
gallery,
],
),
mk.gui.html.flexcol(
[
mk.gui.core.Header("Distribution"),
plot,
table,
],
classes="justify-center",
),
],
classes="gap-4",
)
# Make a page and launch
page = mk.gui.Page(component, id="diffusiondb")
page.launch()
|
meerkat-main
|
demo/diffusiondb.py
|
from datetime import datetime
from typing import List
import meerkat as mk
from meerkat.interactive import html
from meerkat.interactive.app.src.lib.component.core.filter import FilterCriterion
# Data loading.
imagenette = mk.get("imagenette", version="160px")
imagenette = imagenette[["img_id", "path", "img", "label", "index"]]
df = mk.DataFrame.read(
"https://huggingface.co/datasets/meerkat-ml/meerkat-dataframes/resolve/main/imagenette-160px-facebook-convnext-tiny-224.mk.tar.gz", # noqa: E501
overwrite=False,
)
# df = mk.DataFrame.read("~/Downloads/imagenette-remapped.mk")
df = imagenette.merge(df[["img_id", "logits", "pred"]], on="img_id")
df["logits"] = df["logits"].data
# Download precomupted CLIP embeddings for imagenette.
df_clip = mk.DataFrame.read(
"https://huggingface.co/datasets/meerkat-ml/meerkat-dataframes/resolve/main/embeddings/imagenette_160px.mk.tar.gz", # noqa: E501
overwrite=False,
)
df_clip = df_clip[["img_id", "img_clip"]]
df = df.merge(df_clip, on="img_id")
df["correct"] = df.map(lambda label, pred: label in pred, batch_size=len(df), pbar=True)
df.create_primary_key(column="pkey")
IMAGE_COLUMN = "img"
EMBED_COLUMN = "img_clip"
df.mark()
@mk.endpoint()
def add_match_criterion_to_sort(criterion, sort_criteria: mk.Store[List]):
# make a copy of the sort criteria.
criteria = sort_criteria
if criterion.name is None:
return
sort_criteria = [
criterion for criterion in sort_criteria if criterion.source != "match"
]
sort_criteria.insert(
0, mk.gui.Sort.create_criterion(criterion.name, ascending=False, source="match")
)
criteria.set(sort_criteria)
sort = mk.gui.Sort(df)
match = mk.gui.Match(
df,
against=EMBED_COLUMN,
on_match=add_match_criterion_to_sort.partial(sort_criteria=sort.criteria),
)
filter = mk.gui.Filter(df)
df = filter(df)
df_sorted = sort(df)
gallery = mk.gui.Gallery(df_sorted, main_column=IMAGE_COLUMN)
@mk.reactive()
def get_options(df):
return [
c
for c in df.columns
if "match" in c or c == "label" or c == "pred" or c == "correct"
]
select_x = mk.gui.core.Select(values=get_options(df), value="label")
select_y = mk.gui.core.Select(values=get_options(df), value="pred")
select_hue = mk.gui.core.Select(values=get_options(df), value="correct")
select_container = html.div(
[
html.div("x", classes="self-center font-mono"),
select_x,
html.div("y", classes="self-center font-mono"),
select_y,
html.div("hue", classes="self-center font-mono"),
select_hue,
],
classes="w-full flex flex-row justify-center gap-4",
)
@mk.endpoint()
def set_filter_with_plot_selection(criteria: mk.Store, selected: List[str]):
# Find any existing criterion for the primary key.
pkey_filter_exists = False
store = criteria
# `criterion` can sometimes turn into a `dict` from a `FilterCriterion`
# object. This is a bug that we need to fix.
criteria = [
FilterCriterion(**criterion) if isinstance(criterion, dict) else criterion
for criterion in criteria
]
for criterion in criteria:
if criterion.column == "pkey":
criterion.value = selected
criterion.is_enabled = True
pkey_filter_exists = True
store.set(criteria)
break
if not pkey_filter_exists and selected:
criteria.append(
FilterCriterion(
is_enabled=True,
column="pkey",
op="in",
value=selected,
)
)
store.set(criteria)
plot = mk.gui.plotly.ScatterPlot(
df,
x=select_x.value,
y=select_y.value,
hue=select_hue.value,
title="Scatter Plot",
on_select=set_filter_with_plot_selection.partial(criteria=filter.criteria),
)
# ================ Notes =================
def capture_state():
"""Capture the state of the application at the time the."""
return {
"filter": filter.criteria.value,
"sort": sort.criteria.value,
"match": match.criterion.value,
"select_x": select_x.value.value,
"select_y": select_y.value.value,
"select_hue": select_hue.value.value,
}
def set_state(state=None):
if state:
filter.criteria.set(state["filter"])
sort.criteria.set(state["sort"])
match.criterion.set(state["match"])
select_x.value.set(state["select_x"])
select_y.value.set(state["select_y"])
select_hue.value.set(state["select_hue"])
out = (
filter.criteria,
sort.criteria,
match.criterion,
select_x.value,
select_y.value,
select_hue.value,
)
assert all(isinstance(o, mk.Store) for o in out)
return out
@mk.endpoint()
def restore_state(notes: mk.DataFrame, selected: List[int]):
print(selected, type(selected))
if not selected:
return
if len(selected) > 1:
raise ValueError("Can only select one row at a time.")
state = notes.loc[selected[0]]["state"]
set_state(state)
@mk.endpoint()
def add_note(df: mk.DataFrame, notepad_text: mk.Store[str], text):
new_df = mk.DataFrame(
{"time": [str(datetime.now())], "notes": [text], "state": [capture_state()]}
)
if len(df) > 0:
new_df = new_df.append(df)
# Clear the text box.
notepad_text.set("")
df.set(new_df)
# Clear the selection
# selected.set([])
notes = mk.DataFrame(
{
"time": [str(datetime.now())],
"notes": ["An example note."],
"state": [capture_state()],
}
).mark()
notepad_text = mk.Store("")
selected = mk.Store([])
notepad = mk.gui.Textbox(
text=notepad_text,
placeholder="Add your observations...",
classes="w-full h-10 px-3 rounded-md shadow-md my-1 border-gray-400",
on_keyenter=add_note.partial(df=notes, notepad_text=notepad_text),
)
notes_table = mk.gui.Table(
notes[["time", "notes"]],
selected=selected,
single_select=True,
on_select=restore_state.partial(notes=notes),
classes="h-full pad-x-5 pl-2",
)
# ================ Display =================
component = html.div(
[
html.div(
[
match,
filter,
sort,
select_container,
plot,
html.div(
[
html.div(
"Notes",
classes="font-bold text-md text-slate-600 self-start pl-1",
),
html.div(notepad, classes="px-1"),
notes_table,
],
classes="bg-slate-100 px-1 py-2 gap-y-4 rounded-lg w-full h-fit",
),
],
classes="grid grid-rows-[auto_auto_auto_auto_5fr_3fr] h-full",
),
gallery,
],
# Make a grid with two equal sized columns.
classes="h-screen grid grid-cols-2 gap-4",
)
page = mk.gui.Page(component, id="error-analysis", progress=False)
page.launch()
|
meerkat-main
|
demo/error-analysis.py
|
import meerkat as mk
@mk.reactive()
def square(a: float) -> float:
return a**2
@mk.reactive()
def multiply(coef: float, a: float) -> float:
return coef * a
@mk.endpoint()
def increment(value: mk.Store):
value.set(value + 1)
input_slider = mk.gui.Slider(value=2.0)
coef_slider = mk.gui.Slider(value=2.0)
squared_value = square(input_slider.value)
result = multiply(coef_slider.value, squared_value)
button = mk.gui.Button(
title="Increment",
on_click=increment.partial(value=input_slider.value),
)
page = mk.gui.Page(
component=mk.gui.html.div(
[
input_slider,
coef_slider,
button,
mk.gui.Text(result),
],
classes="flex flex-col gap-4 items-center",
),
id="quickstart",
)
page.launch()
|
meerkat-main
|
demo/quickstart-interactive.py
|
"""A reactive image viewer that allows you to select a class and see 16 random
images from that class.
This is a tutorial on how to use `reactive` functions in Meerkat, to
build complex reactive workflows.
"""
import meerkat as mk
df = mk.get("imagenette", version="160px")
IMAGE_COL = "img"
LABEL_COL = "label"
@mk.reactive()
def random_images(df: mk.DataFrame):
images = df.sample(16)[IMAGE_COL]
formatter = images.formatters["base"]
# formatter = images.formatters['tiny']
return [formatter.encode(img) for img in images]
labels = list(df[LABEL_COL].unique())
class_selector = mk.gui.Select(
values=list(labels),
value=labels[0],
)
# Note that neither of these will work:
# filtered_df = df[df[LABEL_COL] == class_selector.value]
# (doesn't react to changes in class_selector.value)
# filtered_df = mk.reactive(lambda df: df[df[LABEL_COL] == class_selector.value])(df)
# (doesn't react to changes in class_selector.value)
filtered_df = mk.reactive(lambda df, label: df[df[LABEL_COL] == label])(
df, class_selector.value
)
images = random_images(filtered_df)
# This won't work with a simple reactive fn like a random_images
# that only has df.sample
# as the encoding needs to be done in the reactive fn
# grid = mk.gui.html.gridcols2([
# mk.gui.Image(data=images.formatters["base"].encode(img)) for img in images
# ])
grid = mk.gui.html.div(
[
# Make the image square
mk.gui.html.div(mk.gui.Image(data=img))
for img in images
],
classes="h-fit grid grid-cols-4 gap-1",
)
layout = mk.gui.html.div(
[
mk.gui.html.div(
[mk.gui.Caption("Choose a class:"), class_selector],
classes="flex justify-center items-center mb-2 gap-2",
),
grid,
],
classes="h-full flex flex-col m-2",
)
page = mk.gui.Page(layout, id="reactive-viewer")
page.launch()
|
meerkat-main
|
demo/tutorial-reactive-viewer.py
|
import os
from gpt_index import GPTSimpleVectorIndex, SimpleDirectoryReader
import meerkat as mk
from meerkat.interactive import (
Button,
Caption,
Header,
Page,
Store,
Subheader,
Textbox,
endpoint,
html,
print,
reactive,
)
# Decorate with @reactive so that this fn is reactive.
# When its inputs change, it will be re-run.
@reactive
def load_index(dir: str, savefilename: str) -> GPTSimpleVectorIndex:
"""Function that loads the GPTSimpleVectorIndex from disk if it exists.
Otherwise it builds the index and saves it to disk.
Args:
dir (str): The directory where the index is saved.
savefilename (str): The name of the file where the index is saved.
Returns:
GPTSimpleVectorIndex: The index.
"""
if not dir or not os.path.exists(dir):
return None
if os.path.exists(os.path.join(dir, f"{savefilename}.json")):
# load the index after savaing (maybe)
return GPTSimpleVectorIndex.load_from_disk(
os.path.join(dir, f"{savefilename}.json")
)
# reader for docs
documents = SimpleDirectoryReader(dir).load_data()
# build the index
index = GPTSimpleVectorIndex(documents)
# save the index somewhere (maybe)
index.save_to_disk(os.path.join(dir, f"{savefilename}.json"))
return index
# Start off with a default directory.
# Make `dir` a `Store` to create a variable that can be used in reactive functions.
# A `Store` is a thin wrapper that behaves like the object it wraps.
dir = mk.Store("/Users/krandiash/Desktop/workspace/projects/gpt_index_data/")
# Create the index.
# Pass `dir` directly as if it were a `str`.
index = load_index(dir=dir, savefilename="gpt_index")
# Create a variable that will be used to store the response from the index
# for the last query. We will display this in the UI.
last_response = mk.Store("The response will appear here.")
@endpoint()
def query_gpt_index(
index: GPTSimpleVectorIndex,
query: str,
last_response: Store,
):
"""Given an index and query, return a response from the index.
Args:
index (GPTSimpleVectorIndex): The index.
query (str): The query.
last_response (Store): The store that will hold the last response.
"""
if not index:
last_response.set(
"Index not created. Please use the picker to choose a folder."
)
return
response = index.query(query)
# Stores can only be set inside endpoints in order to trigger the reactive
# functions that depend on them! The special `set` method helps with this.
last_response.set(response.response)
return response
# Create a Textbox for the user to provide a query.
query_component = Textbox()
# Create a Textbox for the user to provide a directory.
dir_component = Textbox(dir)
# Create a button that will call the `query_gpt_index` endpoint when clicked.
button = Button(
title="Query GPT-Index",
on_click=query_gpt_index.partial(
index=index,
query=query_component.text,
last_response=last_response,
),
)
# Display the response from the index.
# Use HTML to display the response from the index nicely.
text = html.div(
html.p(last_response, classes="font-mono whitespace-pre-wrap"),
classes="flex flex-col items-center justify-center h-full mt-4 bg-violet-200 rounded-sm p-2", # noqa: E501
)
# Print the values of the variables to the console, so we can see them.
# This will reprint them whenever any of the variables change.
print(
"\n", "Query:", query_component.text, "\n", "Dir:", dir, "\n", "Index:", index, "\n"
)
page = Page(
# Layout the Interface components one row each, and launch the page.
html.flexcol(
[
# fileupload_component,
Header("GPT-Index Demo"),
Subheader("Directory Path"),
Caption(
"It should contain a few files to build the GPT-Index. "
f"Defaults to {dir}."
),
dir_component,
Subheader("Query"),
Caption("Ask a question!"),
query_component,
button,
text,
]
),
id="gpt-index",
)
page.launch()
|
meerkat-main
|
demo/gpt-index.py
|
"""Filter and sort an image dataset in a gallery.
This is a tutorial on how to compose complex data components like
`Filter`, `Sort`, and `Gallery` in Meerkat in order to create an
application.
"""
import meerkat as mk
df = mk.get("imagenette", version="160px")
filter = mk.gui.Filter(df)
sort = mk.gui.Sort(df)
gallery = mk.gui.Gallery(sort(filter(df)), main_column="img", tag_columns=["label"])
page = mk.gui.Page(
mk.gui.html.div(
[filter, sort, gallery],
classes="h-full grid grid-rows-[auto,auto,1fr] p-3",
),
id="filter-sort",
)
page.launch()
|
meerkat-main
|
demo/tutorial-filter-sort.py
|
"""Similarity search with CLIP in an interactive image gallery, with advanced
controls for user feedback."""
import rich
import meerkat as mk
from meerkat.interactive.app.src.lib.component.contrib import GalleryQuery
IMAGE_COLUMN = "img"
EMBED_COLUMN = "img_clip"
rich.print(
"[bold red]This script uses CLIP to embed images. "
"This will take some time to download the model. "
"Please be patient.[/bold red]"
)
df = mk.get("imagenette", version="160px")
# Download the precomupted CLIP embeddings for imagenette.
# You can also embed the images yourself with mk.embed. This will take some time.
# To embed: df = mk.embed(df, input=IMAGE_COLUMN, out_col=EMBED_COLUMN, encoder="clip").
df_clip = mk.DataFrame.read(
"https://huggingface.co/datasets/meerkat-ml/meerkat-dataframes/resolve/main/embeddings/imagenette_160px.mk.tar.gz", # noqa: E501
overwrite=False,
)
df = df.merge(df_clip[["img_id", "img_clip"]], on="img_id")
page = mk.gui.Page(
GalleryQuery(df, main_column=IMAGE_COLUMN, against=EMBED_COLUMN),
id="match",
)
page.launch()
|
meerkat-main
|
demo/match-2.py
|
"""Build a simple chat app with Meerkat."""
# TODO: improve the chat component
from datetime import datetime
import meerkat as mk
from meerkat.interactive.app.src.lib.component.core.chat import Chat
def get_time_elapsed(time):
# datetime.now() will be different for each cell, which may cause
# inconsistencies in the time elapsed column.
# TODO: Process the entire column at once.
# Probably wont be that noticeable on a minute scale.
time_elapsed = datetime.now() - time
# Days
days = time_elapsed.days
if days > 0:
return "{} day{} ago".format(days, "s" if days > 1 else "")
# Minutes
seconds = time_elapsed.seconds
if seconds > 60:
minutes = round(seconds / 60)
return "{} minute{} ago".format(minutes, "s" if minutes > 1 else "")
# Seconds
return "{} second{} ago".format(seconds, "s" if seconds > 1 else "")
def _empty_df():
df = mk.DataFrame(
{
"message": ["Hi! I'm a chatbot."],
"sender": ["chatbot"],
"name": ["ChatBot"],
"send_time": [datetime.now()],
}
)
df["time"] = mk.defer(df["send_time"], get_time_elapsed)
return df
@mk.endpoint()
def on_send(df: mk.DataFrame, message: str):
message_time = datetime.now()
new_df = mk.DataFrame(
{
"message": [message, "random message"],
"sender": ["user", "chatbot"],
"name": ["Karan", "ChatBot"],
"send_time": [message_time, datetime.now()],
}
)
new_df["time"] = mk.defer(new_df["send_time"], get_time_elapsed)
df.set(df.append(new_df))
df = _empty_df()
chat = Chat(
df=df,
img_chatbot="https://placeimg.com/200/200/animals",
img_user="https://placeimg.com/200/200/people",
on_send=on_send.partial(df=df),
)
page = mk.gui.Page(
chat,
id="chat",
)
page.launch()
|
meerkat-main
|
demo/chat.py
|
import meerkat as mk
IMAGE_COLUMN = "img"
EMBED_COLUMN = "img_clip"
@mk.endpoint()
def append_to_sort(match_criterion, criteria: mk.Store):
"""Add match criterion to the sort criteria.
Args:
match_criterion: The criterion to add.
This is returned by the match component.
criteria: The current sort criteria.
This argument should be bound to a store that will be modified.
"""
SOURCE = "match"
criterion = mk.gui.Sort.create_criterion(
column=match_criterion.name, ascending=False, source=SOURCE
)
criteria.set([criterion] + [c for c in criteria if c.source != SOURCE])
df = mk.get("imagenette", version="160px")
# Download the precomupted CLIP embeddings for imagenette.
# You can also embed the images yourself with mk.embed. This will take some time.
# To embed: df = mk.embed(df, input=IMAGE_COLUMN, out_col=EMBED_COLUMN, encoder="clip").
df_clip = mk.DataFrame.read(
"https://huggingface.co/datasets/arjundd/meerkat-dataframes/resolve/main/embeddings/imagenette_160px.mk.tar.gz", # noqa: E501
overwrite=False, # set overwrite=True to download the embeddings again.
)
df_clip = df_clip[["img_id", "img_clip"]]
df = df.merge(df_clip, on="img_id")
# with mk.gui.reactive():
# Match
sort_criteria = mk.Store([])
match = mk.gui.Match(
df,
against=EMBED_COLUMN,
title="Search Examples",
on_match=append_to_sort.partial(criteria=sort_criteria),
)
df = match(df)[0]
# Filter
filter = mk.gui.Filter(df)
df = filter(df)
sort = mk.gui.Sort(df, criteria=sort_criteria, title="Sort Examples")
df = sort(df)
gallery = mk.gui.Gallery(df, main_column="img")
mk.gui.start(shareable=False)
page = mk.gui.Page(
mk.gui.html.flexcol(
[
match,
mk.gui.html.flexcol(slots=[filter, sort], classes="grid-cols-2 gap-2"),
gallery,
],
classes="grid-cols-1 gap-2",
),
id="image-search",
)
page.launch()
|
meerkat-main
|
demo/image-search.py
|
"""Run ChatGPT with Meerkat's Chat page.
Requirements:
- Download manifest: `pip install manifest-ml[chatgpt]`
- Follow instructions to get your ChatGPT session key:
https://github.com/HazyResearch/manifest
Run:
CHATGPT_SESSION_KEY="your_session_key" python chatgpt.py
"""
import os
from datetime import datetime
from manifest import Manifest
import meerkat as mk
from meerkat.interactive.app.src.lib.component.core.chat import Chat
manifest = Manifest(
client_name="chatgpt",
client_connection=os.environ.get("CHATGPT_SESSION_KEY"),
)
def get_time_elapsed(time):
# datetime.now() will be different for each cell, which may cause
# inconsistencies in the time elapsed column.
# TODO: Process the entire column at once.
# Probably wont be that noticeable on a minute scale.
time_elapsed = datetime.now() - time
# Days
days = time_elapsed.days
if days > 0:
return "{} day{} ago".format(days, "s" if days > 1 else "")
# Minutes
seconds = time_elapsed.seconds
if seconds > 60:
minutes = round(seconds / 60)
return "{} minute{} ago".format(minutes, "s" if minutes > 1 else "")
# Seconds
return "{} second{} ago".format(seconds, "s" if seconds > 1 else "")
def _empty_df():
df = mk.DataFrame(
{
"message": ["Hi! I'm a chatbot."],
"sender": ["chatbot"],
"name": ["ChatBot"],
"send_time": [datetime.now()],
}
)
df["time"] = mk.defer(df["send_time"], get_time_elapsed)
return df
@mk.endpoint()
def on_send(df: mk.DataFrame, message: str):
message_time = datetime.now()
chatbot = manifest.run(message)
new_df = mk.DataFrame(
{
"message": [message, chatbot],
"sender": ["user", "chatbot"],
"name": ["Karan", "ChatBot"],
"send_time": [message_time, datetime.now()],
}
)
new_df["time"] = mk.defer(new_df["send_time"], get_time_elapsed)
df.set(df.append(new_df))
df = _empty_df()
chat = Chat(
df=df,
img_chatbot="https://placeimg.com/200/200/animals",
img_user="https://placeimg.com/200/200/people",
on_send=on_send.partial(df=df),
)
page = mk.gui.Page(
component=chat,
id="chat",
)
page.launch()
|
meerkat-main
|
demo/chatgpt.py
|
"""Flash-fill.
Run this script with:
OPENAI_API_KEY`mk run flash-fill.py`.
Requirements:
pip install manifest-ml
"""
import os
import meerkat as mk
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
ARXIV_DATASET_JSON = os.getenv("ARXIV_DATASET_JSON")
PDF_CACHE = os.getenv("PDF_CACHE", "~/.meerkat/cache/arxiv-dataset/pdfs")
if not ARXIV_DATASET_JSON:
raise ValueError(
"Please download the ArXiv Dataset from:\n"
"https://www.kaggle.com/datasets/Cornell-University/arxiv/versions/5?resource=download\n" # noqa: E501
"and set the environment variable `ARXIV_DATASET_JSON` "
"to the path of the snapshot file."
)
ARXIV_DATASET_JSON = os.path.abspath(os.path.expanduser(ARXIV_DATASET_JSON))
PDF_CACHE = os.path.abspath(os.path.expanduser(PDF_CACHE))
filepath = ARXIV_DATASET_JSON
df = mk.from_json(filepath=filepath, lines=True, backend="arrow")
df = df[df["categories"].str.contains("stat.ML")]
df = mk.from_pandas(df.to_pandas(), primary_key="id")
df["url"] = "https://arxiv.org/pdf/" + df["id"]
df["pdf"] = mk.files(df["url"], cache_dir=PDF_CACHE, type="pdf")
df = df[["id", "authors", "title", "abstract", "pdf"]]
df["answer"] = ""
page = mk.gui.Page(
component=mk.gui.contrib.FlashFill(df=df, target_column="answer"),
id="flash-fill",
progress=False,
)
page.launch()
|
meerkat-main
|
demo/flash-fill.py
|
"""Query a large language model (LLM) with a question and get an answer.
This is a tutorial on creating an `endpoint` in Meerkat that responds to
a button click.
"""
import os
from manifest import Manifest
import meerkat as mk
manifest = Manifest(
client_name="openai",
client_connection=os.getenv("OPENAI_API_KEY"),
engine="text-davinci-003",
)
textbox = mk.gui.Textbox()
@mk.endpoint()
def get_answer(answer: mk.Store, question: str):
response = manifest.run(question, max_tokens=100)
return answer.set(response)
answer = mk.Store("")
button = mk.gui.Button(
title="Ask an LLM 🗣️",
on_click=get_answer.partial(answer=answer, question=textbox.text),
)
page = mk.gui.Page(
mk.gui.html.div(
[textbox, button, mk.gui.Markdown(answer)],
classes="flex flex-col m-3 items-center gap-1",
),
id="query-llm",
)
page.launch()
|
meerkat-main
|
demo/tutorial-query-llm.py
|
"""An assortment of Flowbite components."""
import meerkat as mk
from meerkat.interactive import flowbite as fb
from meerkat.interactive import html as html
@mk.endpoint()
def on_click():
print("Clicked!")
button = fb.Button(
slots="Click me",
on_click=on_click,
)
card = fb.Card(
slots=[
mk.gui.html.flex(
slots=[button, "My Custom Card"],
classes="flex-row items-center justify-between",
)
],
color="red",
rounded=True,
size="lg",
)
accordion = fb.Accordion(
slots=[
fb.AccordionItem(
slots=[
html.span(slots="My Header 1"),
html.p(
slots=[
"Lorem ipsum dolor sit amet, consectetur adipisicing elit. "
"Illo ab necessitatibus sint explicabo ..."
"Check out this guide to learn how to ",
html.a(
slots="get started",
href="https://tailwindcss.com/docs",
target="_blank",
rel="noreferrer",
classes="text-blue-600 dark:text-blue-500 hover:underline",
),
" and start developing websites even faster with components "
"on top of Tailwind CSS.", # noqa: E501
],
classes="mb-2 text-gray-500 dark:text-gray-400",
),
]
),
fb.AccordionItem(
slots=[
html.span(slots="My Header 2"),
html.p(
slots=[
"Lorem ipsum dolor sit amet, consectetur adipisicing elit. "
"Illo ab necessitatibus sint explicabo ...",
"Lorem ipsum dolor sit amet, consectetur adipisicing elit. "
"Illo ab necessitatibus sint explicabo ...",
"Learn more about these technologies:",
html.ul(
slots=[
html.li(
slots=html.a(
slots="Lorem ipsum",
href="https://tailwindcss.com/docs",
target="_blank",
rel="noreferrer",
classes="text-blue-600 dark:text-blue-500 hover:underline", # noqa: E501
)
),
html.li(
slots=html.a(
slots="Tailwind UI",
href="https://tailwindui.com/",
target="_blank",
rel="noreferrer",
classes="text-blue-600 dark:text-blue-500 hover:underline", # noqa: E501
)
),
],
classes="list-disc pl-5 dark:text-gray-400 text-gray-500",
),
],
classes="mb-2 text-gray-500 dark:text-gray-400",
),
]
),
]
)
alert = fb.Alert(
slots=[
html.span(
slots=[
html.svg(
slots=[
html.path(
fill_rule="evenodd",
d="M18 10a8 8 0 11-16 0 8 8 0 0116 0zm-7-4a1 1 0 11-2 0 1 1 0 012 0zM9 9a1 1 0 000 2v3a1 1 0 001 1h1a1 1 0 100-2v-3a1 1 0 00-1-1H9z", # noqa: E501
clip_rule="evenodd",
)
],
aria_hidden="true",
classes="w-5 h-5",
fill="currentColor",
viewBox="0 0 20 20",
)
],
# slot="icon",
),
html.span(slots="This is a info alert"),
html.div(
slots=[
html.div(
slots=[
"More info about this info alert goes here. This example text "
"is going to run a bit longer so that you can see how spacing "
"within an alert works with this kind of content.",
],
classes="mt-2 mb-4 text-sm",
),
html.div(
slots=[
fb.Button(
slots=[
html.svg(
slots=[
html.path(d="M10 12a2 2 0 100-4 2 2 0 000 4z"),
html.path(
d="M.458 10C1.732 5.943 5.522 3 10 3s8.268 2.943 9.542 7c-1.274 4.057-5.064 7-9.542 7S1.732 14.057.458 10zM14 10a4 4 0 11-8 0 4 4 0 018 0z", # noqa: E501
fill_rule="evenodd",
clip_rule="evenodd",
),
],
classes="-ml-0.5 mr-2 h-4 w-4",
fill="currentColor",
viewBox="0 0 20 20",
aria_hidden="true",
),
"View more",
],
size="xs",
),
fb.Button(
slots="Go to Home",
size="xs",
outline=True,
color="blue",
btnClass="dark:!text-blue-800",
),
],
classes="flex gap-2",
),
],
classes="mt-4",
),
],
color="blue",
)
avatar = html.div(
slots=[
html.div(
slots=[
fb.Avatar(
src="http://placekitten.com/200/200",
stacked=True,
classes="mr-2",
),
fb.Avatar(
src="http://placekitten.com/200/200",
stacked=True,
classes="mr-2",
),
fb.Avatar(
src="http://placekitten.com/200/200",
stacked=True,
classes="mr-2",
),
fb.Avatar(stacked=True),
],
classes="flex mb-5",
),
html.div(
slots=[
fb.Avatar(
src="http://placekitten.com/200/200",
stacked=True,
classes="mr-2",
),
fb.Avatar(
src="http://placekitten.com/200/200",
stacked=True,
classes="mr-2",
),
fb.Avatar(
src="http://placekitten.com/200/200",
stacked=True,
classes="mr-2",
),
fb.Avatar(
stacked=True,
href="/",
classes="bg-gray-700 text-white hover:bg-gray-600 text-sm",
slots="+99",
),
],
classes="flex",
),
],
classes="flex flex-col",
)
carousel = html.div(
slots=[
fb.Carousel(
images=[
{
"id": "1",
"imgurl": "http://placeimg.com/640/480/nature",
"name": "Image 1",
},
{
"id": "2",
"imgurl": "http://placeimg.com/640/480/animals",
"name": "Image 2",
},
{
"id": "3",
"imgurl": "http://placeimg.com/640/480/tech",
"name": "Image 3",
},
],
)
],
classes="max-w-4xl",
)
toast = fb.Toast(
slots=[
html.div(
slots=[
html.div(
slots=[
html.svg(
slots=[
html.path(
d="M20.25 7.5l-.625 10.632a2.25 2.25 0 01-2.247 2.118H6.622a2.25 2.25 0 01-2.247-2.118L3.75 7.5M10 11.25h4M3.375 7.5h17.25c.621 0 1.125-.504 1.125-1.125v-1.5c0-.621-.504-1.125-1.125-1.125H3.375c-.621 0-1.125.504-1.125 1.125v1.5c0 .621.504 1.125 1.125 1.125z", # noqa: E501
stroke_linecap="round",
stroke_linejoin="round",
stroke_width="1.5",
stroke="currentColor",
fill="none",
classes="w-6 h-6",
),
],
viewBox="0 0 24 24",
),
],
classes="flex-shrink-0",
),
html.div(
slots="Default color is blue.",
classes="ml-3",
),
],
classes="flex",
),
],
classes="mb-2",
open=True,
position="top-left",
)
heading = html.h1(slots="Flowbite Demo", classes="text-4xl")
page = mk.gui.Page(
mk.gui.html.flexcol([heading, card, accordion, alert, avatar, carousel, toast]),
id="flowbite",
)
page.launch()
|
meerkat-main
|
demo/flowbite.py
|
"""Filter an image dataset in a gallery."""
import meerkat as mk
df = mk.get("imagenette", version="160px")
filter = mk.gui.Filter(df)
df = filter(df)
gallery = mk.gui.Gallery(df, main_column="img")
page = mk.gui.Page(
mk.gui.html.flexcol([filter, gallery]),
id="filter",
)
page.launch()
|
meerkat-main
|
demo/filter.py
|
"""Display a data frame in an interactive table."""
import meerkat as mk
df = mk.get("imagenette", version="160px")
table = mk.gui.Table(df)
page = mk.gui.Page(table, id="table")
page.launch()
|
meerkat-main
|
demo/table.py
|
"""Flash-fill."""
import os
from functools import partial
from manifest import Manifest
import meerkat as mk
from meerkat.dataframe import Batch
manifest = Manifest(
client_name="openai",
client_connection=os.getenv("OPENAI_API_KEY"),
)
def complete_prompt(row, example_template: mk.Store[str]):
assert isinstance(row, dict)
output = example_template.format(**row)
return output
df = mk.DataFrame(
{
"guest": [
"Col. Mustard",
"Mrs. White",
"Mr. Green",
"Mrs. Peacock",
"Prof. Plum",
],
"hometown": ["London", "Stockholm", "Bath", "New York", "Paris"],
"relationship": ["father", "aunt", "cousin", "teacher", "brother"],
"_status": ["train", "train", "train", "fill", "fill"],
}
)
@mk.endpoint()
def run_manifest(instruct_cmd: str, df: mk.DataFrame, output_col: str):
def _run_manifest(example: Batch):
# Format instruct-example-instruct prompt.
return manifest.run(
[f"{instruct_cmd} {in_context_examples} {x}" for x in example]
)
# Concat all of the in-context examples.
train_df = df[df["_status"] == "train"]
in_context_examples = "\n".join(train_df["example"]())
fill_df_index = df["_status"] == "fill"
fill_df = df[fill_df_index]
# Filter based on train/abstain/fill
# Only fill in the blanks.
col = output_col
flash_fill = mk.map(
fill_df, function=_run_manifest, is_batched_fn=True, batch_size=4
)
# If the dataframe does not have the column, add it.
if col not in df.columns:
df[col] = ""
df[col][fill_df_index] = flash_fill
df.set(df)
@mk.reactive()
def update_df_with_example_template(df: mk.DataFrame, template: mk.Store[str]):
"""Update the df with the new prompt template.
This is as simple as returning a view.
"""
df = df.view()
# Extract out the example template from the prompt template.
df["example"] = mk.defer(
df, function=partial(complete_prompt, example_template=example_template)
)
return df
output_col_area = mk.gui.Textbox("", placeholder="Column name here...")
output_col = output_col_area.text
@mk.endpoint()
def set_code(code: mk.Store[str], new_code: str):
code.set(new_code)
instruction_editor = mk.gui.Editor(code="give me the name of my guest")
example_template_editor = mk.gui.Editor(
code="Guest name: {guest}, hometown: {hometown}; Result: {guest}"
)
instruction_cmd = instruction_editor.code
mk.gui.print("Instruction commmand:", instruction_cmd)
mk.gui.print("Example template:", example_template_editor.code)
example_template = example_template_editor.code
df_view = update_df_with_example_template(df, example_template)
table = mk.gui.Gallery(df_view, main_column="guest")
run_manifest_button = mk.gui.Button(
title="Run Manifest",
on_click=run_manifest.partial(
instruct_cmd=instruction_cmd, df=df_view, output_col=output_col
),
)
page = mk.gui.Page(
mk.gui.html.flexcol(
[
mk.gui.html.flex(
[
mk.gui.Caption("Output Column"),
output_col_area,
],
classes="items-center gap-4 mx-4",
),
mk.gui.html.flex(
[
# Make the items fill out the space.
mk.gui.html.flexcol(
[
mk.gui.Caption("Instruction Template"),
instruction_editor,
],
classes="flex-1 gap-1",
),
mk.gui.html.flexcol(
[
mk.gui.Caption("Example Template"),
example_template_editor,
],
classes="flex-1 gap-1",
),
],
classes="w-full gap-4 mx-4",
),
run_manifest_button,
table,
],
classes="gap-4",
),
id="flash-fill",
)
page.launch()
|
meerkat-main
|
demo/prompt.py
|
"""A demo for segmenting images with the Segment Anything Model (SAM).
This demo requires access to the Segment Anything Model (SAM) model.
https://github.com/facebookresearch/segment-anything
"""
# flake8: noqa: E402
import os
from typing import List
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import matplotlib.pyplot as plt
import numpy as np
import PIL
from PIL import Image as PILImage
from segment_anything import SamAutomaticMaskGenerator, SamPredictor, sam_model_registry
import meerkat as mk
from meerkat.interactive.formatter.image import ImageFormatterGroup
# Initialize SAM model
model_type = "vit_b"
sam_checkpoint = os.path.expanduser(
"~/.cache/segment_anything/models/sam_vit_b_01ec64.pth"
)
device = "mps"
sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
sam.to(device=device)
predictor = SamPredictor(sam)
@mk.reactive()
def prepare_image(image):
"""Prepare the image for the SAM model."""
if isinstance(image, PIL.Image.Image):
image = np.asarray(image)
# Grayscale -> RGB.
if image.ndim == 2:
image = np.stack([image] * 3, axis=-1)
elif image.ndim == 3 and image.shape[-1] == 1:
image = np.concatenate([image, image, image], axis=-1)
return np.asarray(image)
@mk.reactive()
def predict(image, boxes: mk.Store[List], segmentations: mk.Store[List]):
"""Segment the image with the SAM model.
When ``boxes`` changes, this function will be called to segment the image.
The segmentation will be appended to the segmentations list.
This is a bit hacky, but it should work.
Note:
Because this will run any time ``boxes`` changes, it will also run
when boxes are deleted, which can lead to unexpected behavior.
Returns:
np.ndarray: The colorized segmentation mask.
"""
image = prepare_image(image)
predictor.set_image(np.asarray(image))
combined_segmentations = []
if len(boxes) > 0:
# Assume the last box is the most recently added box.
box = boxes[-1]
box_array = np.asarray(
[box["x"], box["y"], box["x"] + box["width"], box["y"] + box["height"]]
)
masks, scores, _ = predictor.predict(box=box_array)
best_idx = np.argsort(scores)[-1]
predictions = [(masks[best_idx], box["category"])]
segmentations.extend(predictions)
combined_segmentations.extend(segmentations)
# Do this to trigger the reactive function.
# segmentations.set(segmentations)
return mk.Store(combined_segmentations, backend_only=True)
@mk.reactive()
def predict_point(
image, points: mk.Store[List], segmentations: mk.Store[List], selected_category: str
):
"""Segment the image with the SAM model.
When ``boxes`` changes, this function will be called to segment the image.
The segmentation will be appended to the segmentations list.
This is a bit hacky, but it should work.
Note:
Because this will run any time ``boxes`` changes, it will also run
when boxes are deleted, which can lead to unexpected behavior.
Returns:
np.ndarray: The colorized segmentation mask.
"""
image = prepare_image(image)
predictor.set_image(np.asarray(image))
combined_segmentations = []
if len(points) > 0:
points = np.asarray([[point["x"], point["y"]] for point in points])
# Assume that all points are foreground points.
point_labels = np.ones(len(points))
masks, scores, _ = predictor.predict(
point_coords=points, point_labels=point_labels
)
best_idx = np.argsort(scores)[-1]
# If there is already a prediction for this category,
# we assume that we are operating on this prediction.
# Remove this mask and add the new one.
# TODO: Use this mask as a prompt for the prediction.
categories = [seg[1] for seg in segmentations]
if selected_category in categories:
segmentations[categories.index(selected_category)] = (
masks[best_idx],
selected_category,
)
else:
predictions = [(masks[best_idx], selected_category)]
segmentations.extend(predictions)
combined_segmentations.extend(segmentations)
# Do this to trigger the reactive function.
# segmentations.set(segmentations)
return mk.Store(combined_segmentations, backend_only=True)
@mk.reactive()
def clear_points(points: List, selected_category: str):
"""When the selected category changes, clear the points."""
# We do not want to trigger segmentation to re-run here.
# So we use a hacky solution and clear the points.
points.clear()
return points
@mk.reactive()
def get_categories(segmentations: List[str]):
return [seg[1] for seg in segmentations]
@mk.reactive()
def get_img_and_annotations(idx: int):
row = images[idx]
img = row["image"]()
segmentations = row["segmentation"]
boxes = row["boxes"]
points = row["points"]
# Pushing up large arrays to the frontend is slow.
# Only maintain the image and segmentations on the backend.
# The serialized version of the image and segmentations will be sent to the frontend.
img = mk.Store(img, backend_only=True)
segmentations = mk.Store(segmentations, backend_only=True)
boxes = mk.Store(boxes, backend_only=False)
points = mk.Store(points, backend_only=False)
return mk.Store((img, segmentations, boxes, points), backend_only=True)
@mk.endpoint()
def increment(idx: mk.Store[int], annotator: mk.gui.ImageAnnotator):
idx.set(idx + 1) # set max guard
annotator.clear_annotations()
return idx
@mk.endpoint()
def decrement(idx: mk.Store[int], annotator: mk.gui.ImageAnnotator):
idx.set(max(idx - 1, 0))
annotator.clear_annotations()
return idx
# Hacky way to get the annotations written to the dataframe
# with reactive statements.
@mk.reactive()
def update_annotations(idx: int, ann_type: str, annotations: List[str]):
images[ann_type][idx] = annotations
# Build the dataframe.
files = [
# "https://kulapartners.com/wp-content/uploads/2017/06/multiple-personas-hero.jpg",
"https://3acf3052-cdn.agilitycms.cloud/images/service/KNEE%20SAGITTAL.jpg",
# "https://www.mercurynews.com/wp-content/uploads/2022/01/BNG-L-WARRIORS-0122-28.jpg?w=1024"
]
images = mk.DataFrame({"image": mk.files(files, type="image")})
# Add an empty list for each of the annotations.
# This is a simple way of managing to keep track of the annotations in the dataframe.
images["segmentation"] = [[] for _ in range(len(images))]
images["boxes"] = [[] for _ in range(len(images))]
images["points"] = [[] for _ in range(len(images))]
idx = mk.Store(0, backend_only=True)
selected_category = mk.Store("")
img, segmentations, boxes, points = get_img_and_annotations(idx)
update_annotations(idx, "boxes", boxes)
update_annotations(idx, "points", points)
points = clear_points(points=points.unmark(), selected_category=selected_category)
box_segmentations = predict(
image=img,
boxes=boxes,
segmentations=segmentations,
)
point_segmentations = predict_point(
image=img,
points=points,
segmentations=segmentations,
selected_category=selected_category.unmark(),
)
with mk.magic():
combined_segmentations = box_segmentations + point_segmentations
# Image annotator.
selected_category.mark()
categories = get_categories(segmentations)
annotator = mk.gui.ImageAnnotator(
img,
categories=[], # categories["name"].tolist(),
segmentations=combined_segmentations,
points=points,
boxes=boxes,
selected_category=selected_category,
)
# Layout.
component = mk.gui.html.gridcols3(
[
mk.gui.Button(
title="",
icon="ChevronLeft",
on_click=decrement.partial(idx=idx, annotator=annotator),
),
annotator,
mk.gui.Button(
title="",
icon="ChevronRight",
on_click=increment.partial(idx=idx, annotator=annotator),
),
],
classes="h-screen grid-cols-[auto_6fr_auto]",
)
page = mk.gui.Page(component, id="SAM")
page.launch()
|
meerkat-main
|
demo/sam.py
|
import meerkat as mk
from meerkat.interactive.formatter.raw_html import HTMLFormatterGroup
df = mk.get("olivierdehaene/xkcd", registry="huggingface")["train"]
for col in df.columns:
df[col] = df[col].to_numpy()
df["webpage"] = mk.files(df["url"]).format(HTMLFormatterGroup().defer())
filter = mk.gui.Filter(df)
filtered_df = filter(df)
gallery = mk.gui.html.div(
[
filter,
mk.gui.Gallery(
filtered_df.format(
{
"explained_url": HTMLFormatterGroup(),
}
),
main_column="image_url",
),
],
classes="flex flex-col h-[80vh]",
)
page = mk.gui.Page(component=gallery, id="xkcd")
page.launch()
|
meerkat-main
|
demo/xkcd.py
|
"""Similarity search with CLIP in an interactive image gallery."""
import rich
import meerkat as mk
IMAGE_COLUMN = "img"
EMBED_COLUMN = "img_clip"
rich.print(
"[bold red]This script uses CLIP to embed images. "
"This will take some time to download the model. "
"Please be patient.[/bold red]"
)
df = mk.get("imagenette", version="160px")
# Download the precomupted CLIP embeddings for imagenette.
# You can also embed the images yourself with mk.embed. This will take some time.
# To embed: df = mk.embed(df, input=IMAGE_COLUMN, out_col=EMBED_COLUMN, encoder="clip").
df_clip = mk.DataFrame.read(
"https://huggingface.co/datasets/meerkat-ml/meerkat-dataframes/resolve/main/embeddings/imagenette_160px.mk.tar.gz", # noqa: E501
overwrite=False,
)
df = df.merge(df_clip[["img_id", "img_clip"]], on="img_id")
match = mk.gui.Match(df, against=EMBED_COLUMN)
df = df.mark()
with mk.magic():
# Get the name of the match criterion in a reactive way.
criterion_name = match.criterion.name
df_sorted = mk.sort(df, by=criterion_name, ascending=False)
gallery = mk.gui.Gallery(df_sorted, main_column=IMAGE_COLUMN)
page = mk.gui.Page(
mk.gui.html.flexcol([match, gallery]),
id="match",
)
page.launch()
|
meerkat-main
|
demo/match.py
|
meerkat-main
|
demo/together/__init__.py
|
|
meerkat-main
|
demo/together/main_together.py
|
|
"""A basic chatbot implementation that uses Meerkat to create a chatbot and
manage its state, and MiniChain to perform prompting.
Borrows code from [MiniChain](https://github.com/srush/minichain)'s
chatbot example.
"""
import json
import os
import pathlib
from dataclasses import dataclass
from datetime import datetime
from typing import List, Tuple
import jinja2
import minichain
import rich
import meerkat as mk
CHATBOT = "chatbot"
USER = "user"
@dataclass
class Memory:
"""Generic stateful memory."""
memory: List[Tuple[str, str]]
size: int = 2
human_input: str = ""
def push(self, response: str) -> "Memory":
memory = self.memory if len(self.memory) < self.size else self.memory[1:]
self.memory = memory + [(self.human_input, response)]
return self
# Get path to this file
PATH = pathlib.Path(__file__).parent.absolute()
class ChatPrompt(minichain.TemplatePrompt):
"""Chat prompt with memory."""
template = jinja2.Environment(
loader=jinja2.FileSystemLoader(str(PATH))
).get_template("chatbot.pmpt.tpl")
def parse(self, out: str, inp: Memory) -> Memory:
result = out.split("Assistant:")[-1]
return inp.push(result)
class ConversationHistory:
"""Stores the full conversation history, and keeps track of the agent's
memory to use for prompting."""
def __init__(
self,
greeting: str = "Hi! Welcome to Meerkat!",
chatbot_name: str = "Meerkat",
savepath: str = "history.jsonl",
):
# Create an data frame with a single message from the chatbot.
df = mk.DataFrame(
{
"message": [greeting],
"sender": [CHATBOT],
"name": [chatbot_name],
"time": [self.timestamp(datetime.now())],
},
)
self.df = df
# Create a memory component that will be used to do prompting.
self.memory = Memory([], size=2)
# Store the history in a jsonl file.
self.savepath = savepath
if os.path.exists(savepath):
self.df = mk.DataFrame.from_json(savepath, lines=True)
self.restore_memory()
else:
self.write_last_message()
def restore_memory(self):
# Restore the memory by looking back at the last
# 2 * memory.size messages (if possible)
if len(self.df) < 2 * self.memory.size:
lookback = (len(self.df) // 2) * 2
else:
lookback = self.memory.size * 2
window = self.df[-lookback:]
self.memory = Memory(
[
(window[i]["message"], window[i + 1]["message"])
for i in range(0, len(window), 2)
]
)
@staticmethod
def timestamp(time: datetime):
# Formats as 04:20 PM / Jan 01, 2020
return time.strftime("%I:%M %p / %b %d, %Y")
def update(self, message: str, sender: str, name: str, send_time: datetime):
df = mk.DataFrame(
{
"message": [message],
"sender": [sender],
"name": [name],
"time": [self.timestamp(send_time)],
},
)
# THIS IS ESSENTIAL!
# Running a df.set will automatically trigger a re-render on the
# frontend, AS LONG AS this method is called inside an `mk.endpoint`!
# Otherwise, this will behave like a normal Python method.
self.df.set(self.df.append(df))
self.write_last_message()
def write_last_message(self):
# Write the last message.
with open(self.savepath, "a") as f:
f.write(json.dumps(self.df[-1]) + "\n")
def get_chatbot_response(prompt: ChatPrompt, memory: Memory, message: str) -> str:
"""Run the minichain prompt, update the state, and get the response."""
memory.human_input = message
memory = prompt(memory)
response = memory.memory[-1][1]
return response
@mk.endpoint()
def run_chatbot(
history: "ConversationHistory",
message: str,
prompt: ChatPrompt,
chatbot_name: str = "Meerkat",
user_name: str = "Me",
):
"""Update the conversation history and get a response from the chatbot."""
history.update(message, USER, user_name, datetime.now())
response = get_chatbot_response(prompt, history.memory, message)
history.update(response, CHATBOT, chatbot_name, datetime.now())
# A pure Python component in Meerkat just needs to subclass `div`!
class BasicChatbot(mk.gui.html.div):
"""A basic chatbot component."""
# Expose the chat Component that is used internally
chat: mk.gui.Chat = None
# Expose the chat history
history: ConversationHistory = None
# Expose the `on_send` endpoint of the `Chat` component.
on_send: mk.gui.Endpoint = None
def __init__(
self,
model: str = "curie",
chatbot_name: str = "🤖 Meerkat",
user_name: str = "Me",
img_chatbot: str = "http://meerkat.wiki/favicon.png",
img_user: str = "http://placekitten.com/200/200",
savepath: str = "history.jsonl",
classes: str = "h-full flex flex-col",
):
# Spin up the prompt engine with `minichain`.
backend = minichain.start_chain("chatbot")
prompt = ChatPrompt(backend.OpenAI(model=model))
# Keep track of the conversation history.
# Also contains a memory that is used for prompting.
history = ConversationHistory(
chatbot_name=chatbot_name,
savepath=savepath,
)
# This endpoint takes in one remaining argument, `message`, which
# is the message sent by the user. It then updates the conversation
# history, and gets a response from the chatbot.
on_send: mk.gui.Endpoint = run_chatbot.partial(
history=history,
prompt=prompt,
chatbot_name=chatbot_name,
user_name=user_name,
)
# Create the chat component
chat = mk.gui.Chat(
df=history.df,
img_chatbot=img_chatbot,
img_user=img_user,
on_send=on_send,
)
# Make a little header on top of the chat component.
header = mk.gui.html.div(
mk.gui.Caption(
f"Chatbot running `{model}`, and built using "
"🔮 [Meerkat](http://meerkat.wiki)."
),
classes="bg-gray-50 dark:bg-slate-300 mx-8 px-2 w-fit rounded-t-lg",
)
# Call the constructor for `div`, which we wrap around
# any components made here.
super().__init__([header, chat], classes=classes)
# Store stuff
self.chat = chat
self.history = history
self.on_send = on_send
def test_basic_chatbot():
"""Test the basic chatbot component by simulating a conversation and
checking that the memory is updated."""
# Create a basic chatbot
chatbot = BasicChatbot("curie")
# Simulate a conversation and check the history.
chatbot.on_send.run(message="Who are you?")
rich.print(chatbot.history.memory)
chatbot.on_send.run(message="How are you?")
rich.print(chatbot.history.memory)
chatbot.on_send.run(message="What is your name?")
rich.print(chatbot.history.memory)
|
meerkat-main
|
demo/chatbot/chatbot_basic_minichain.py
|
import json
import os
import shutil
from datetime import datetime
import openai
import meerkat as mk
from demo.chatbot.characters import CHARACTERS
CHATBOT = "chatbot"
USER = "user"
class ConversationHistory:
"""Stores the full conversation history, and keeps track of the agent's
memory to use for prompting."""
def __init__(
self,
greeting: str = "Hi! Welcome to Meerkat!",
chatbot_name: str = "Meerkat",
savepath: str = "history.jsonl",
):
# Create an data frame with a single message from the chatbot.
df = mk.DataFrame(
{
"message": [greeting],
"sender": [CHATBOT],
"name": [chatbot_name],
"time": [self.timestamp(datetime.now())],
},
)
self.df = df
# Store the history in a jsonl file.
self.savepath = savepath
if os.path.exists(savepath):
self.df = mk.DataFrame.from_json(savepath, lines=True)
else:
self.write_last_message()
@staticmethod
def timestamp(time: datetime):
# Formats as 04:20 PM / Jan 01, 2020
return time.strftime("%I:%M %p / %b %d, %Y")
def update(self, message: str, sender: str, name: str, send_time: datetime):
df = mk.DataFrame(
{
"message": [message],
"sender": [sender],
"name": [name],
"time": [self.timestamp(send_time)],
},
)
# THIS IS ESSENTIAL!
# Running a df.set will automatically trigger a re-render on the
# frontend, AS LONG AS this method is called inside an `mk.endpoint`!
# Otherwise, this will behave like a normal Python method.
self.df.set(self.df.append(df))
self.write_last_message()
def write_last_message(self):
# Write the last message.
with open(self.savepath, "a") as f:
f.write(json.dumps(self.df[-1]) + "\n")
def get_chatbot_response(
history: ConversationHistory,
lookback: int,
instructions: str = "You are a helpful assistant.",
) -> str:
"""Run the OpenAI chat completion, using a subset of the chat history."""
assert lookback > 0, "Lookback must be greater than 0."
# Lookback, and rename columns to align with OpenAI's API.
messages = history.df[-lookback:].rename({"sender": "role", "message": "content"})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": instructions}]
+ [
message if message["role"] == USER else {**message, **{"role": "assistant"}}
for message in messages["role", "content"]
],
)
return response["choices"][0]["message"]["content"]
@mk.endpoint()
def run_chatbot(
history: ConversationHistory,
message: str,
lookback: int = 10,
instructions: str = "You are a helpful assistant.",
chatbot_name: str = "Meerkat",
user_name: str = "Me",
):
"""Update the conversation history and get a response from the chatbot."""
history.update(message, USER, user_name, datetime.now())
response = get_chatbot_response(history, lookback, instructions)
history.update(response, CHATBOT, chatbot_name, datetime.now())
# A pure Python component in Meerkat just needs to subclass `div`!
class CharacterChatbot(mk.gui.html.div):
"""A basic chatbot component."""
# Expose the chat Component that is used internally
chat: mk.gui.Chat = None
# Expose the chat history
history: ConversationHistory = None
# Expose the `on_send` endpoint of the `Chat` component.
on_send: mk.gui.Endpoint = None
def __init__(
self,
lookback: int = 10,
user_name: str = "Me",
img_user: str = "http://placekitten.com/200/200",
savepath: str = "history.jsonl",
classes: str = "h-full flex flex-col pb-12",
):
# Selector for character name
character_select = mk.gui.Select(
values=list(range(len(CHARACTERS))),
labels=[c["character"] for c in CHARACTERS],
value=0,
)
character = mk.reactive(lambda value: CHARACTERS[value])(character_select.value)
chatbot_name = mk.reactive(lambda character: "🤖 " + character["character"])(
character
)
instructions = mk.reactive(
lambda character: character["instructions"]
+ f" You must only answer as {character['character']},"
" and never refer to yourself as a language model."
" Make your responses as crazy and whimsical as possible."
)(character)
mk.gui.print(character)
# Keep track of the conversation history.
# Also contains a memory that is used for prompting.
history = mk.reactive(
lambda character: ConversationHistory(
greeting=character["greeting"],
chatbot_name=chatbot_name,
savepath=savepath.replace(".jsonl", f".{character['id']}.jsonl"),
)
)(character)
# This endpoint takes in one remaining argument, `message`, which
# is the message sent by the user. It then updates the conversation
# history, and gets a response from the chatbot.
on_send: mk.gui.Endpoint = run_chatbot.partial(
history=history,
lookback=lookback,
instructions=instructions,
chatbot_name=chatbot_name,
user_name=user_name,
)
# Create the chat component
with mk.magic():
chat = mk.gui.Chat(
df=history.df,
img_chatbot=character["img"],
img_user=img_user,
on_send=on_send,
)
# Make a little header on top of the chat component.
header = mk.gui.Caption(
"Character chat, built using 🔮 [Meerkat](http://meerkat.wiki).",
classes="self-center max-w-full bg-gray-50 dark:bg-slate-300"
" mx-8 px-2 w-fit text-center rounded-t-lg",
)
# Call the constructor for `div`, which we wrap around
# any components made here.
super().__init__([header, chat, character_select], classes=classes)
# Store stuff
self.chat = chat
self.history = history
self.on_send = on_send
def test_chatgpt():
"""Test the chatbot component by simulating a conversation."""
# Create chatbot
chatbot = CharacterChatbot(savepath="temp.history.jsonl")
# Simulate a conversation and check the history.
chatbot.on_send.run(message="Who are you?")
chatbot.on_send.run(message="How are you?")
chatbot.on_send.run(message="What is your name?")
# Remove the temp file.
shutil.rmtree("temp.history.jsonl", ignore_errors=True)
|
meerkat-main
|
demo/chatbot/chatbot_chatgpt_character.py
|
"""A ChatGPT demo, built in Meerkat."""
import os
import meerkat as mk
from demo.chatbot.chatbot_chatgpt import ChatGPT
FILEPATH = os.path.dirname(os.path.abspath(__file__))
chatgpt = ChatGPT(savepath=os.path.join(FILEPATH, "history.chatgpt.jsonl"))
page = mk.gui.Page(chatgpt, id="chatgpt")
page.launch()
|
meerkat-main
|
demo/chatbot/main_chatgpt.py
|
"""A lofi version of character.ai, built in Meerkat with ChatGPT."""
import os
import meerkat as mk
from demo.chatbot.chatbot_chatgpt_character import CharacterChatbot
FILEPATH = os.path.dirname(os.path.abspath(__file__))
charbot = CharacterChatbot(
savepath=os.path.join(FILEPATH, "history.charbot.jsonl"),
)
page = mk.gui.Page(charbot, id="charbot")
page.launch()
|
meerkat-main
|
demo/chatbot/main_chatgpt_character.py
|
# flake8: noqa
CHARACTERS = [
{
"id": "galileo",
"character": "Galileo Galilei",
"instructions": "You are Galileo Galilei, an Italian astronomer, physicist, and mathematician who played a major role in the Scientific Revolution. You made significant contributions to the fields of astronomy, physics, and engineering, and are known for your support of the Copernican theory of heliocentrism.",
"img": "https://upload.wikimedia.org/wikipedia/commons/c/cc/Galileo.arp.300pix.jpg",
"greeting": "Hello, I am Galileo Galilei, an Italian astronomer, physicist, and mathematician who played a major role in the Scientific Revolution.",
},
{
"id": "newton",
"character": "Isaac Newton",
"instructions": "You are Isaac Newton, an English mathematician, physicist, and astronomer who is widely regarded as one of the most influential scientists in history. You formulated the laws of motion and universal gravitation, and made significant contributions to the development of calculus.",
"img": "https://upload.wikimedia.org/wikipedia/commons/3/39/GodfreyKneller-IsaacNewton-1689.jpg",
"greeting": "Hello, I am Isaac Newton, an English mathematician, physicist, and astronomer who is widely regarded as one of the most influential scientists in history.",
},
{
"id": "marcopolo",
"character": "Marco Polo",
"instructions": "You are Marco Polo, an Italian merchant, explorer, and writer who traveled extensively throughout Asia in the 13th century. You wrote a book called 'The Travels of Marco Polo,' which became one of the most famous travelogues in history.",
"img": "https://upload.wikimedia.org/wikipedia/commons/5/54/Marco_Polo_portrait.jpg?20110104135540",
"greeting": "Hello, I am Marco Polo, an Italian merchant, explorer, and writer who traveled extensively throughout Asia in the 13th century.",
},
{
"id": "confucius",
"character": "Confucius",
"instructions": "You are Confucius, a Chinese philosopher and teacher who founded Confucianism, a system of thought and behavior that emphasizes moral values, social harmony, and respect for authority. You are widely regarded as one of the most important figures in Chinese history.",
"img": "https://upload.wikimedia.org/wikipedia/commons/9/9a/Confucius_the_scholar.jpg",
"greeting": "Hello, I am Confucius, a Chinese philosopher and teacher who founded Confucianism, a system of thought and behavior that emphasizes moral values, social harmony, and respect for authority.",
},
{
"id": "joanofarc",
"character": "Joan d'Arc (Jeanne d'Arc)",
"instructions": "You are Joan d'Arc (Jeanne d'Arc), a French military leader and heroine who played a significant role in the Hundred Years' War. You claimed that you received divine guidance from God, and led the French army to several important victories before being captured and executed by the English.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Joan_of_Arc_miniature_graded.jpg/402px-Joan_of_Arc_miniature_graded.jpg",
"greeting": "Hello, I am Joan d'Arc (Jeanne d'Arc), a French military leader and heroine who played a significant role in the Hundred Years' War.",
},
{
"id": "williamtheconqueror",
"character": "William the Conqueror",
"instructions": "You are William the Conqueror, the first Norman king of England who conquered England in 1066. You are known for your military skill, your ambitious building projects, and your efforts to centralize and strengthen the English monarchy.",
"img": "https://upload.wikimedia.org/wikipedia/commons/8/85/King_William_I_%28%27The_Conqueror%27%29_from_NPG.jpg",
"greeting": "Hello, I am William the Conqueror, the first Norman king of England who conquered England in 1066.",
},
{
"id": "catherinethegreat",
"character": "Catherine the Great",
"instructions": "You are Catherine the Great, the Empress of Russia who ruled from 1762 to 1796. You are known for your reforms, your patronage of the arts and sciences, and your expansionist policies, which saw Russia gain territory and become a major European power.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/f/f1/Catherine_II_by_J.B.Lampi_%281780s%2C_Kunsthistorisches_Museum%29.jpg/230px-Catherine_II_by_J.B.Lampi_%281780s%2C_Kunsthistorisches_Museum%29.jpg",
"greeting": "Hello, I am Catherine the Great, the Empress of Russia who ruled from 1762 to 1796.",
},
{
"id": "socrates",
"character": "Socrates",
"instructions": "You are Socrates, a Greek philosopher who is credited with founding Western philosophy. You are known for your method of questioning, which is called the Socratic method, and for your emphasis on ethics and morality.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/1/19/Anderson%2C_Domenico_%281854-1938%29_-_n._23185_-_Socrate_%28Collezione_Farnese%29_-_Museo_Nazionale_di_Napoli.jpg/470px-Anderson%2C_Domenico_%281854-1938%29_-_n._23185_-_Socrate_%28Collezione_Farnese%29_-_Museo_Nazionale_di_Napoli.jpg",
"greeting": "Hello, I am Socrates, a Greek philosopher who is credited with founding Western philosophy.",
},
{
"id": "caesar",
"character": "Julius Caesar Augustus",
"instructions": "You are Julius Caesar Augustus, the first Roman emperor who brought an end to the Roman Republic and ushered in the Roman Empire. You were a skilled military commander and a gifted politician who implemented many reforms that helped to stabilize and strengthen the Roman Empire.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/8/8f/Gaius_Iulius_Caesar_%28Vatican_Museum%29.jpg/230px-Gaius_Iulius_Caesar_%28Vatican_Museum%29.jpg",
"greeting": "Hello, I am Julius Caesar Augustus, the first Roman emperor who brought an end to the Roman Republic and ushered in the Roman Empire.",
},
{
"id": "napoleon",
"character": "Napoleon Bonaparte",
"instructions": "You are Napoleon Bonaparte, a French military and political leader who rose to prominence during the French Revolution and went on to become the Emperor of France. You are known for your military conquests, your reforms to the French legal and education systems, and your efforts to spread French culture and ideals throughout Europe.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/6/6e/Napoleon_crop.jpg/600px-Napoleon_crop.jpg",
"greeting": "Hello, I am Napoleon Bonaparte, a French military and political leader who rose to prominence during the French Revolution and went on to become the Emperor of France.",
},
{
"id": "elizabethi",
"character": "Queen Elizabeth I",
"instructions": "You are Queen Elizabeth I, the Queen of England who ruled from 1558 to 1603. You are known for your intelligence, your political skill, and your patronage of the arts, which saw the Elizabethan era become a time of great cultural and literary achievement in England.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/Elizabeth_I_of_England_%28Royal_Collection%29.jpg/800px-Elizabeth_I_of_England_%28Royal_Collection%29.jpg",
"greeting": "Hello, I am Queen Elizabeth I, the Queen of England who ruled from 1558 to 1603.",
},
{
"id": "alexanderthegreat",
"character": "Alexander the Great",
"instructions": "You are Alexander the Great, the King of Macedon who conquered a vast empire that stretched from Greece to India. You are known for your military genius, your cultural achievements, and your efforts to spread Greek culture and civilization throughout the world.",
"img": "https://upload.wikimedia.org/wikipedia/commons/8/84/Alexander_the_Great_mosaic_%28cropped%29.jpg",
"greeting": "Hello, I am Alexander the Great, the King of Macedon who conquered a vast empire that stretched from Greece to India.",
},
{
"id": "davinci",
"character": "Leonardo da Vinci",
"instructions": "You are Leonardo da Vinci, an Italian polymath who is widely regarded as one of the most brilliant minds in history. You made significant contributions to the fields of art, science, and engineering, and are known for your iconic paintings, such as the Mona Lisa and The Last Supper.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/b/ba/Leonardo_self.jpg/382px-Leonardo_self.jpg",
"greeting": "Hello, I am Leonardo da Vinci, an Italian polymath who is widely regarded as one of the most brilliant minds in history.",
},
{
"id": "gandhi",
"character": "Mahatma Gandhi",
"instructions": "You are Mahatma Gandhi, an Indian independence activist who led India to independence from British rule. You are known for your philosophy of nonviolent resistance, your promotion of civil rights and freedom, and your advocacy for Hindu-Muslim unity in India.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/d1/Portrait_Gandhi.jpg/399px-Portrait_Gandhi.jpg",
"greeting": "Hello, I am Mahatma Gandhi, an Indian independence activist who led India to independence from British rule.",
},
{
"id": "washington",
"character": "George Washington",
"instructions": "You are George Washington, the first President of the United States and the commander-in-chief of the Continental Army during the American Revolution. You are known for your leadership, your integrity, and your dedication to the principles of democracy and freedom.",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/b/b6/Gilbert_Stuart_Williamstown_Portrait_of_George_Washington.jpg/230px-Gilbert_Stuart_Williamstown_Portrait_of_George_Washington.jpg",
"greeting": "Hello, I am George Washington, the first President of the United States and the commander-in-chief of the Continental Army during the American Revolution.",
},
{
"id": "shakespeare",
"character": "William Shakespeare",
"instructions": "You are William Shakespeare, an English playwright and poet who is widely regarded as one of the greatest writers in the English language. You wrote many of the most famous plays in history, including Romeo and Juliet, Hamlet, and Macbeth.",
"img": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTQMJW3_bbgsXvadSQuMzNdVPD3zgEMCDQ-mvPWzUIzWVlzZZgtWd_Wm0_-jbIIvXRVTHc&usqp=CAU",
"greeting": "Hello, I am William Shakespeare, an English playwright and poet who is widely regarded as one of the greatest writers in the English language.",
},
{
"id": "mlk",
"character": "Martin Luther King Jr.",
"instructions": "You are Martin Luther King Jr., an American Baptist minister and activist who became the most visible spokesperson and leader in the civil rights movement from 1954 until your assassination in 1968. You are known for your advocacy of nonviolent resistance, your promotion of racial equality and justice, and your famous speeches, such as 'I Have a Dream' and 'Letter from Birmingham Jail.'",
"img": "https://upload.wikimedia.org/wikipedia/commons/thumb/0/05/Martin_Luther_King%2C_Jr..jpg/320px-Martin_Luther_King%2C_Jr..jpg",
"greeting": "Hello, I am Martin Luther King Jr., an American Baptist minister and activist who became the most visible spokesperson and leader in the civil rights movement from 1954 until your assassination in 1968.",
},
{
"id": "edison",
"character": "Thomas Edison",
"instructions": "You are Thomas Edison, an American inventor and businessman who is best known for inventing the light bulb, the phonograph, and the motion picture camera. You made significant contributions to the fields of science and technology, and helped to revolutionize the way we live and work.",
"img": "https://upload.wikimedia.org/wikipedia/commons/0/04/Thomas_Edison.jpg?20121112181037",
"greeting": "Hello, I am Thomas Edison, an American inventor and businessman who is best known for inventing the light bulb, the phonograph, and the motion picture camera.",
},
{
"id": "mario",
"character": "Mario",
"instructions": "You are Mario, the main character of the Mario video game franchise. You are a plumber from Brooklyn who has to save Princess Peach from the evil Bowser. You are known for your iconic red hat, your mustache, and your ability to jump on enemies.",
"img": "https://mario.wiki.gallery/images/3/3e/MPSS_Mario.png",
"greeting": "Hello, it's-a me, Mario!",
},
{
"id": "masterchief",
"character": "Master Chief",
"instructions": "You are Master Chief, the main character of the Halo video game franchise. You are a genetically enhanced super-soldier who fights against the alien Covenant. You are known for your iconic armor, your helmet, and your ability to wield a variety of weapons.",
"img": "https://upload.wikimedia.org/wikipedia/en/4/42/Master_chief_halo_infinite.png",
"greeting": "Hello, I am Master Chief, the main character of the Halo video game franchise.",
},
]
|
meerkat-main
|
demo/chatbot/characters.py
|
import json
import os
import shutil
from datetime import datetime
import openai
import meerkat as mk
CHATBOT = "chatbot"
USER = "user"
class ConversationHistory:
"""Stores the full conversation history, and keeps track of the agent's
memory to use for prompting."""
def __init__(
self,
greeting: str = "Hi! Welcome to Meerkat!",
chatbot_name: str = "Meerkat",
savepath: str = "history.jsonl",
):
# Create an data frame with a single message from the chatbot.
df = mk.DataFrame(
{
"message": [greeting],
"sender": [CHATBOT],
"name": [chatbot_name],
"time": [self.timestamp(datetime.now())],
},
)
self.df = df
# Store the history in a jsonl file.
self.savepath = savepath
if os.path.exists(savepath):
self.df = mk.DataFrame.from_json(savepath, lines=True)
else:
self.write_last_message()
@staticmethod
def timestamp(time: datetime):
# Formats as 04:20 PM / Jan 01, 2020
return time.strftime("%I:%M %p / %b %d, %Y")
def update(self, message: str, sender: str, name: str, send_time: datetime):
df = mk.DataFrame(
{
"message": [message],
"sender": [sender],
"name": [name],
"time": [self.timestamp(send_time)],
},
)
# THIS IS ESSENTIAL!
# Running a df.set will automatically trigger a re-render on the
# frontend, AS LONG AS this method is called inside an `mk.endpoint`!
# Otherwise, this will behave like a normal Python method.
self.df.set(self.df.append(df))
self.write_last_message()
def write_last_message(self):
# Write the last message.
with open(self.savepath, "a") as f:
f.write(json.dumps(self.df[-1]) + "\n")
def get_chatbot_response(
history: ConversationHistory,
lookback: int,
instructions: str = "You are a helpful assistant.",
) -> str:
"""Run the OpenAI chat completion, using a subset of the chat history."""
assert lookback > 0, "Lookback must be greater than 0."
# Lookback, and rename columns to align with OpenAI's API.
messages = history.df[-lookback:].rename({"sender": "role", "message": "content"})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": instructions}]
+ [
message if message["role"] == USER else {**message, **{"role": "assistant"}}
for message in messages["role", "content"]
],
)
return response["choices"][0]["message"]["content"]
@mk.endpoint()
def run_chatbot(
history: ConversationHistory,
message: str,
lookback: int = 10,
instructions: str = "You are a helpful assistant.",
chatbot_name: str = "Meerkat",
user_name: str = "Me",
):
"""Update the conversation history and get a response from the chatbot."""
history.update(message, USER, user_name, datetime.now())
response = get_chatbot_response(history, lookback, instructions)
history.update(response, CHATBOT, chatbot_name, datetime.now())
# A pure Python component in Meerkat just needs to subclass `div`!
class ChatGPT(mk.gui.html.div):
"""A basic chatbot component."""
# Expose the chat Component that is used internally
chat: mk.gui.Chat = None
# Expose the chat history
history: ConversationHistory = None
# Expose the `on_send` endpoint of the `Chat` component.
on_send: mk.gui.Endpoint = None
def __init__(
self,
lookback: int = 10,
instructions: str = "You are a helpful assistant.",
chatbot_name: str = "🤖 Meerkat",
user_name: str = "Me",
img_chatbot: str = "http://meerkat.wiki/favicon.png",
img_user: str = "http://placekitten.com/200/200",
savepath: str = "history.jsonl",
classes: str = "h-full flex flex-col pb-8",
):
# Keep track of the conversation history.
# Also contains a memory that is used for prompting.
history = ConversationHistory(chatbot_name=chatbot_name, savepath=savepath)
# This endpoint takes in one remaining argument, `message`, which
# is the message sent by the user. It then updates the conversation
# history, and gets a response from the chatbot.
on_send: mk.gui.Endpoint = run_chatbot.partial(
history=history,
lookback=lookback,
instructions=instructions,
chatbot_name=chatbot_name,
user_name=user_name,
)
# Create the chat component
chat = mk.gui.Chat(
df=history.df,
img_chatbot=img_chatbot,
img_user=img_user,
on_send=on_send,
)
# Make a little header on top of the chat component.
header = mk.gui.Caption(
"ChatGPT, built using 🔮 [Meerkat](http://meerkat.wiki).",
classes="self-center max-w-full bg-gray-50 dark:bg-slate-300 mx-8 px-2"
" w-fit text-center rounded-t-lg",
)
# Call the constructor for `div`, which we wrap around
# any components made here.
super().__init__([header, chat], classes=classes)
# Store stuff
self.chat = chat
self.history = history
self.on_send = on_send
def test_chatgpt():
"""Test the chatgpt component by simulating a conversation and checking
that the memory is updated."""
# Create chatgpt
chatgpt = ChatGPT(savepath="temp.history.jsonl")
# Simulate a conversation and check the history.
chatgpt.on_send.run(message="Who are you?")
chatgpt.on_send.run(message="How are you?")
chatgpt.on_send.run(message="What is your name?")
# Remove the temp file.
shutil.rmtree("temp.history.jsonl", ignore_errors=True)
|
meerkat-main
|
demo/chatbot/chatbot_chatgpt.py
|
"""A basic chatbot demo, powered by Meerkat and Minichain."""
import os
import meerkat as mk
from demo.chatbot.chatbot_basic_minichain import BasicChatbot
FILEPATH = os.path.dirname(os.path.abspath(__file__))
chatbot = BasicChatbot(
model="text-davinci-003",
chatbot_name="🤖 Meerkat",
user_name="Me",
img_chatbot="http://meerkat.wiki/favicon.png",
img_user="http://placekitten.com/200/200",
savepath=os.path.join(FILEPATH, "history.jsonl"),
)
page = mk.gui.Page(chatbot, id="chatbot")
page.launch()
|
meerkat-main
|
demo/chatbot/main_basic_minichain.py
|
from __future__ import annotations
import os
from dataclasses import dataclass
from pathlib import Path
import yaml
CONFIG_ENV_VARIABLE = "MEERKAT_CONFIG"
DATASETS_ENV_VARIABLE = "MEERKAT_DATASETS"
@dataclass
class MeerkatConfig:
display: DisplayConfig
datasets: DatasetsConfig
system: SystemConfig
engines: EnginesConfig
@classmethod
def from_yaml(cls, path: str = None):
if path is None:
path = os.environ.get(
CONFIG_ENV_VARIABLE,
os.path.join(os.path.join(Path.home(), ".meerkat"), "config.yaml"),
)
if not os.path.exists(path):
# create empty config
yaml.dump({"display": {}, "datasets": {}}, open(path, "w"))
config = yaml.load(open(path, "r"), Loader=yaml.FullLoader)
config = cls(
display=DisplayConfig(**config.get("display", {})),
datasets=DatasetsConfig(**config.get("datasets", {})),
system=SystemConfig(**config.get("system", {})),
engines=EnginesConfig(**config.get("engines", {})),
)
os.environ[DATASETS_ENV_VARIABLE] = config.datasets.root_dir
return config
@dataclass
class DisplayConfig:
max_rows: int = 10
show_images: bool = True
max_image_height: int = 128
max_image_width: int = 128
show_audio: bool = True
@dataclass
class EnginesConfig:
openai_api_key: str = None
openai_organization: str = None
anthropic_api_key: str = None
@dataclass
class SystemConfig:
use_gpu: bool = True
ssh_identity_file: str = os.path.join(Path.home(), ".meerkat/ssh/id_rsa")
class DatasetsConfig:
def __init__(self, root_dir: str = None):
if root_dir is None:
self.root_dir: str = os.path.join(Path.home(), ".meerkat/datasets")
else:
self.root_dir: str = root_dir
@property
def root_dir(self):
return self._root_dir
@root_dir.setter
def root_dir(self, value):
os.environ[DATASETS_ENV_VARIABLE] = value
self._root_dir = value
config = MeerkatConfig.from_yaml()
|
meerkat-main
|
meerkat/config.py
|
__version__ = "0.4.11"
|
meerkat-main
|
meerkat/version.py
|
"""Adapted from
https://github.com/ad12/meddlr/blob/main/meddlr/utils/env.py."""
import importlib
import re
from importlib import util
from packaging import version
_SUPPORTED_PACKAGES = {}
def package_available(name: str) -> bool:
"""Returns if package is available.
Args:
name (str): Name of the package.
Returns:
bool: Whether module exists in environment.
"""
global _SUPPORTED_PACKAGES
if name not in _SUPPORTED_PACKAGES:
_SUPPORTED_PACKAGES[name] = importlib.util.find_spec(name) is not None
return _SUPPORTED_PACKAGES[name]
def get_package_version(package_or_name) -> str:
"""Returns package version.
Args:
package_or_name (``module`` or ``str``): Module or name of module.
This package must have the version accessible through
``<module>.__version__``.
Returns:
str: The package version.
Examples:
>>> get_version("numpy")
"1.20.0"
"""
if isinstance(package_or_name, str):
if not package_available(package_or_name):
raise ValueError(f"Package {package_or_name} not available")
spec = util.find_spec(package_or_name)
package_or_name = util.module_from_spec(spec)
spec.loader.exec_module(package_or_name)
version = package_or_name.__version__
return version
def is_package_installed(pkg_str) -> bool:
"""Verify that a package dependency is installed and in the expected
version range.
This is useful for optional third-party dependencies where implementation
changes are not backwards-compatible.
Args:
pkg_str (str): The pip formatted dependency string.
E.g. "numpy", "numpy>=1.0.0", "numpy>=1.0.0,<=1.10.0", "numpy==1.10.0"
Returns:
bool: Whether dependency is satisfied.
Note:
This cannot resolve packages where the pip name does not match the python
package name. ``'-'`` characters are automatically changed to ``'_'``.
"""
ops = {
"==": lambda x, y: x == y,
"<=": lambda x, y: x <= y,
">=": lambda x, y: x >= y,
"<": lambda x, y: x < y,
">": lambda x, y: x > y,
}
comparison_patterns = "(==|<=|>=|>|<)"
pkg_str = pkg_str.strip()
pkg_str = pkg_str.replace("-", "_")
dependency = list(re.finditer(comparison_patterns, pkg_str))
if len(dependency) == 0:
return package_available(pkg_str)
pkg_name = pkg_str[: dependency[0].start()]
if not package_available(pkg_name):
return False
pkg_version = version.Version(get_package_version(pkg_name))
version_limits = pkg_str[dependency[0].start() :].split(",")
for vlimit in version_limits:
comp_loc = list(re.finditer(comparison_patterns, vlimit))
if len(comp_loc) != 1:
raise ValueError(f"Invalid version string: {pkg_str}")
comp_op = vlimit[comp_loc[0].start() : comp_loc[0].end()]
comp_version = version.Version(vlimit[comp_loc[0].end() :])
if not ops[comp_op](pkg_version, comp_version):
return False
return True
def is_torch_available() -> bool:
"""Returns if torch is available.
Returns:
bool: Whether torch is available.
"""
return package_available("torch")
|
meerkat-main
|
meerkat/env.py
|
from functools import lru_cache
import numpy as np
import pandas as pd
from meerkat.columns.abstract import Column
from meerkat.columns.scalar.abstract import ScalarColumn
from meerkat.interactive.node import NodeMixin
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.provenance import ProvenanceMixin
from meerkat.tools.lazy_loader import LazyLoader
from .dataframe import DataFrame
ibis = LazyLoader("ibis")
torch = LazyLoader("torch")
class IbisDataFrame(DataFrame):
def __init__(
self,
expr: "ibis.Expr",
):
self.expr = expr
super().__init__(data=expr, primary_key="id")
@property
def columns(self):
return self.expr.columns
def __len__(self) -> int:
out = self.expr.count().execute()
return out
@property
def nrows(self):
"""Number of rows in the DataFrame."""
if self.ncols == 0:
return 0
return self.expr.count().execute()
@property
def ncols(self):
"""Number of rows in the DataFrame."""
return len(self.data.columns)
def _get_loc(self, keyidx, materialize: bool = False):
if self.primary_key_name is None:
raise ValueError(
"Cannot use `loc` without a primary key. Set a primary key using "
"`set_primary_key`."
)
if isinstance(
keyidx, (np.ndarray, list, tuple, pd.Series, torch.Tensor, Column)
):
return IbisDataFrame(
self.expr[self.expr[self.primary_key_name].isin(keyidx)].execute(),
)
else:
posidx = self.primary_key._keyidx_to_posidx(keyidx)
row = self.data.apply("_get", index=posidx, materialize=materialize)
return {k: row[k] for k in self.columns}
def _get(self, posidx, materialize: bool = False):
if isinstance(posidx, str):
# str index => column selection (AbstractColumn)
if posidx in self.columns:
return IbisColumn(self.expr[[posidx]])
raise KeyError(f"Column `{posidx}` does not exist.")
elif isinstance(posidx, int):
# int index => single row (dict)
out = self.expr.limit(n=1, offset=posidx).to_pandas().iloc[0].to_dict()
return out
# cases where `index` returns a dataframe
index_type = None
if isinstance(posidx, slice):
# slice index => multiple row selection (DataFrame)
index_type = "row"
if posidx.step is not None:
raise ValueError("Slice step is not supported.")
start = posidx.start if posidx.start is not None else 0
stop = posidx.stop if posidx.stop is not None else len(self)
return DataFrame.from_arrow(
self.expr.limit(n=max(0, stop - start), offset=start).to_pyarrow()
)
elif (isinstance(posidx, tuple) or isinstance(posidx, list)) and len(posidx):
# tuple or list index => multiple row selection (DataFrame)
if isinstance(posidx[0], str):
index_type = "column"
else:
index_type = "row"
return DataFrame(
[self._get(i, materialize=materialize) for i in posidx]
)
elif isinstance(posidx, np.ndarray):
if len(posidx.shape) != 1:
raise ValueError(
"Index must have 1 axis, not {}".format(len(posidx.shape))
)
# numpy array index => multiple row selection (DataFrame)
index_type = "row"
elif torch.is_tensor(posidx):
if len(posidx.shape) != 1:
raise ValueError(
"Index must have 1 axis, not {}".format(len(posidx.shape))
)
# torch tensor index => multiple row selection (DataFrame)
index_type = "row"
elif isinstance(posidx, pd.Series):
index_type = "row"
elif isinstance(posidx, Column):
# column index => multiple row selection (DataFrame)
index_type = "row"
else:
raise TypeError("Invalid index type: {}".format(type(posidx)))
if index_type == "column":
if not set(posidx).issubset(self.columns):
missing_cols = set(posidx) - set(self.columns)
raise KeyError(f"DataFrame does not have columns {missing_cols}")
df = IbisDataFrame(self.expr[posidx])
return df
elif index_type == "row": # pragma: no cover
raise NotImplementedError("TODO: implement row selection")
def _clone(self, data: object = None, **kwargs):
state = self._get_state(clone=True)
state.update(kwargs)
obj = self.__class__.__new__(self.__class__)
obj._set_state(state)
obj._set_data(data)
if isinstance(self, ProvenanceMixin):
# need to create a node for the object
obj._init_node()
if isinstance(self, IdentifiableMixin):
obj._set_id()
if isinstance(self, NodeMixin):
obj._set_inode()
# obj._set_children()
from meerkat.dataframe import DataFrame
if isinstance(obj, DataFrame):
if obj.primary_key_name not in obj:
# need to reset the primary key if we remove the column
obj.set_primary_key(None, inplace=True)
obj.expr = data
return obj
def _set_data(self, value: "ibis.expr" = None):
self.expr = value
self._data = value
class IbisColumn(Column):
def __init__(self, expr: "ibis.Expr"):
self.expr = expr
super().__init__(data=expr)
def _set_data(self, value: "ibis.expr" = None):
self.expr = value
self._data = value
@lru_cache(maxsize=1)
def full_length(self):
return self.expr.count().execute()
def _get_default_formatters(self):
# can't implement this as a class level property because then it will treat
# the formatter as a method
from meerkat.interactive.formatter import (
BooleanFormatterGroup,
NumberFormatterGroup,
TextFormatterGroup,
)
dtype = self.expr.schema().types[0]
if isinstance(dtype, ibis.expr.datatypes.String):
return TextFormatterGroup()
elif isinstance(dtype, ibis.expr.datatypes.Boolean):
return BooleanFormatterGroup()
elif isinstance(dtype, ibis.expr.datatypes.Integer):
return NumberFormatterGroup(dtype="int")
elif isinstance(dtype, ibis.expr.datatypes.Floating):
return NumberFormatterGroup(dtype="float")
return super()._get_default_formatters()
def _get(self, index, materialize: bool = True):
if self._is_batch_index(index):
# only create a numpy array column
if isinstance(index, slice):
start = index.start if index.start is not None else 0
stop = index.stop if index.stop is not None else len(self)
data = self.expr.limit(
n=max(0, stop - start), offset=start
).to_pandas()[self.expr.columns[0]]
return ScalarColumn(data=data)
raise NotImplementedError("TODO: implement batch index")
else:
limited = self.expr.limit(n=1, offset=index).to_pandas()
return limited[limited.columns[0]][0]
|
meerkat-main
|
meerkat/ibis.py
|
from .dataframe import DataFrame
# TODO: remove this once we're satisfying re-backwards compatibility
DataPanel = DataFrame
__all__ = ["DataPanel"]
|
meerkat-main
|
meerkat/datapanel.py
|
"""DataFrame class."""
from __future__ import annotations
import logging
import os
import pathlib
import shutil
import warnings
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import cytoolz as tz
import dill
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas._libs import lib
import meerkat
from meerkat.block.manager import BlockManager
from meerkat.columns.abstract import Column
from meerkat.columns.scalar.abstract import ScalarColumn
from meerkat.columns.scalar.arrow import ArrowScalarColumn
from meerkat.errors import ConversionError
from meerkat.interactive.graph.marking import is_unmarked_context, unmarked
from meerkat.interactive.graph.reactivity import reactive
from meerkat.interactive.graph.store import Store
from meerkat.interactive.modification import DataFrameModification
from meerkat.interactive.node import NodeMixin
from meerkat.mixins.cloneable import CloneableMixin
from meerkat.mixins.deferable import DeferrableMixin
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.mixins.indexing import IndexerMixin, MaterializationMixin
from meerkat.mixins.inspect_fn import FunctionInspectorMixin
from meerkat.mixins.reactifiable import ReactifiableMixin
from meerkat.provenance import ProvenanceMixin
from meerkat.row import Row
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.tools.utils import convert_to_batch_fn, dump_yaml, load_yaml
torch = LazyLoader("torch")
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
if TYPE_CHECKING:
from meerkat.interactive.formatter.base import FormatterGroup
logger = logging.getLogger(__name__)
Example = Dict
Batch = Dict[str, Union[List, Column]]
BatchOrDataset = Union[Batch, "DataFrame"]
class DataFrame(
CloneableMixin,
FunctionInspectorMixin,
IdentifiableMixin,
NodeMixin,
DeferrableMixin,
MaterializationMixin,
IndexerMixin,
ProvenanceMixin,
ReactifiableMixin,
):
"""A collection of equal length columns.
Args:
data (Union[dict, list]): A dictionary of columns or a list of dictionaries.
primary_key (str, optional): The name of the primary key column.
Defaults to ``None``.
"""
_self_identifiable_group: str = "dataframes"
# Path to a log directory
logdir: pathlib.Path = pathlib.Path.home() / "meerkat/"
# Create a directory
logdir.mkdir(parents=True, exist_ok=True)
def __init__(
self,
data: Union[dict, list] = None,
primary_key: Union[str, bool] = True,
*args,
**kwargs,
):
super(DataFrame, self).__init__(
*args,
**kwargs,
)
self._primary_key = None if primary_key is False else primary_key
self.data = data
if primary_key is True:
self._infer_primary_key(create=True)
def _react(self):
"""Converts the object to a reactive object in-place."""
self._self_marked = True
for col in self.columns:
self[col].mark()
return self
def _no_react(self):
"""Converts the object to a non-reactive object in-place."""
self._self_marked = False
for col in self.columns:
self[col].unmark()
return self
@property
def gui(self):
from meerkat.interactive.gui import DataFrameGUI
return DataFrameGUI(self)
@reactive
def format(self, formatters: Dict[str, FormatterGroup]) -> DataFrame:
"""Create a view of the DataFrame with formatted columns.
Example
Args:
formatters (Dict[str, FormatterGroup]): A dictionary mapping column names to
FormatterGroups.
Returns:
DataFrame: A view of the DataFrame with formatted columns.
Examples
---------
.. code-block:: python
# assume df is a DataFrame with columns "img", "text", "id"
gallery = mk.Gallery(
df=df.format(
img={"thumbnail": ImageFormatter(max_size=(48, 48))},
text={"icon": TextFormatter()},
)
)
"""
df = self.view()
for k, v in formatters.items():
if k not in self.columns:
raise ValueError(f"Column {k} not found in DataFrame.")
df[k] = df[k].format(v)
return df
@unmarked()
def _repr_pandas_(self, max_rows: int = None):
if max_rows is None:
max_rows = meerkat.config.display.max_rows
df, formatters = self.data._repr_pandas_(max_rows=max_rows)
rename = {k: f"{k}" for k, v in self.items()}
return (
df[self.columns].rename(columns=rename),
{rename[k]: v for k, v in formatters.items()},
)
@unmarked()
def _ipython_display_(self):
from IPython.display import HTML, display
from meerkat.state import state
if state.frontend_info is None:
max_rows = meerkat.config.display.max_rows
df, formatters = self._repr_pandas_(max_rows=max_rows)
return display(
HTML(df.to_html(formatters=formatters, max_rows=max_rows, escape=False))
)
return self.gui.table()._ipython_display_()
@unmarked()
def __repr__(self):
return (
f"{self.__class__.__name__}" f"(nrows: {self.nrows}, ncols: {self.ncols})"
)
def __len__(self):
# __len__ is required to return an int. It cannot return a Store[int].
# As such, it cannot be wrapped in `@reactive` decorator.
# To get the length of a DataFrame in a reactive way, use `df.nrows`.
self._reactive_warning("len", placeholder="df")
with unmarked():
_len = self.nrows
return _len
def __contains__(self, item):
# __contains__ is called when using the `in` keyword.
# `in` casts the output to a boolean (i.e. `bool(self.__contains__(item))`).
# Store.__bool__ is not reactive because __bool__ has to return
# a bool (not a Store). Thus, we cannot wrap __contains__ in
# `@reactive` decorator.
if not is_unmarked_context() and self.marked:
warnings.warn(
"The `in` operator is not reactive. Use `df.contains(...)`:\n"
"\t>>> df.contains(item)"
)
return self.columns.__contains__(item)
@reactive()
def contains(self, item):
return self.__contains__(item)
@property
def data(self) -> BlockManager:
"""Get the underlying data (excluding invisible rows).
To access underlying data with invisible rows, use `_data`.
"""
return self._data
def _set_data(self, value: Union[BlockManager, Mapping] = None):
if isinstance(value, BlockManager):
self._data = value
elif isinstance(value, Mapping):
self._data = BlockManager.from_dict(value)
elif isinstance(value, Sequence):
if not isinstance(value[0], Mapping):
raise ValueError(
"Cannot set DataFrame `data` to a Sequence containing object of "
f" type {type(value[0])}. Must be a Sequence of Mapping."
)
gen = (list(x.keys()) for x in value)
columns = lib.fast_unique_multiple_list_gen(gen)
data = lib.dicts_to_array(value, columns=columns)
# Assert all columns are the same length
data = {column: list(data[:, idx]) for idx, column in enumerate(columns)}
self._data = BlockManager.from_dict(data)
elif value is None:
self._data = BlockManager()
else:
raise ValueError(
f"Cannot set DataFrame `data` to object of type {type(value)}."
)
# check that the primary key is still valid after data reset
if self._primary_key is not None:
if (
self._primary_key not in self
or not self.primary_key._is_valid_primary_key()
):
self.set_primary_key(None)
@data.setter
def data(self, value):
self._set_data(value)
@property
def columns(self):
"""Column names in the DataFrame."""
return list(self.data.keys())
@property
def primary_key(self) -> ScalarColumn:
"""The column acting as the primary key."""
if self._primary_key is None:
return None
return self[self._primary_key]
@property
def primary_key_name(self) -> str:
"""The name of the column acting as the primary key."""
return self._primary_key
def set_primary_key(self, column: str, inplace: bool = False) -> DataFrame:
"""Set the DataFrame's primary key using an existing column. This is an
out-of-place operation. For more information on primary keys, see the
User Guide.
Args:
column (str): The name of an existing column to set as the primary key.
"""
if column is not None:
if column not in self.columns:
raise ValueError(
"Must pass the name of an existing column to `set_primary_key`."
f'"{column}" not in {self.columns}'
)
if not self[column]._is_valid_primary_key():
raise ValueError(
f'Column "{column}" cannot be used as a primary key. Ensure that '
"columns of this type can be used as primary keys and \
that the values in the column are unique."
)
if inplace:
self._primary_key = column
return self
else:
return self._clone(_primary_key=column)
def create_primary_key(self, column: str):
"""Create a primary key of contiguous integers.
Args:
column (str): The name of the column to create.
"""
self[column] = np.arange(self.nrows)
self.set_primary_key(column, inplace=True)
def _infer_primary_key(self, create: bool = False) -> str:
"""Infer the primary key from the data."""
for column in self.columns:
if self[column]._is_valid_primary_key():
self.set_primary_key(column, inplace=True)
return column
if create:
self.create_primary_key("pkey")
return "pkey"
return None
@property
def nrows(self):
"""Number of rows in the DataFrame."""
if self.ncols == 0:
return 0
return self._data.nrows
@property
def ncols(self):
"""Number of rows in the DataFrame."""
return self.data.ncols
@property
def shape(self):
"""Shape of the DataFrame (num_rows, num_columns)."""
return self.nrows, self.ncols
@reactive(nested_return=False)
def size(self):
"""Shape of the DataFrame (num_rows, num_columns)."""
return self.shape
def add_column(self, name: str, data: Column.Columnable, overwrite=False) -> None:
"""Add a column to the DataFrame."""
assert isinstance(
name, str
), f"Column name must of type `str`, not `{type(name)}`."
assert (name not in self.columns) or overwrite, (
f"Column with name `{name}` already exists, "
f"set `overwrite=True` to overwrite."
)
if not is_listlike(data):
data = [data] * self.nrows
if name in self.columns:
self.remove_column(name)
column = Column.from_data(data)
assert len(column) == len(self) or len(self.columns) == 0, (
f"`add_column` failed. "
f"Values length {len(column)} != dataset length {len(self)}."
)
# Add the column
self.data[name] = column
logger.info(f"Added column `{name}` with length `{len(column)}`.")
def remove_column(self, column: str) -> None:
"""Remove a column from the dataset."""
assert column in self.columns, f"Column `{column}` does not exist."
# Remove the column
del self.data[column]
if column == self._primary_key:
# need to reset the primary key if we remove the column
self.set_primary_key(None, inplace=True)
logger.info(f"Removed column `{column}`.")
# @capture_provenance(capture_args=["axis"])
@reactive()
def append(
self,
df: DataFrame,
axis: Union[str, int] = "rows",
suffixes: Tuple[str] = None,
overwrite: bool = False,
) -> DataFrame:
"""Append a batch of data to the dataset.
`example_or_batch` must have the same columns as the dataset
(regardless of what columns are visible).
"""
return meerkat.concat(
[self, df], axis=axis, suffixes=suffixes, overwrite=overwrite
)
@reactive()
def head(self, n: int = 5) -> DataFrame:
"""Get the first `n` examples of the DataFrame."""
return self[:n]
@reactive()
def tail(self, n: int = 5) -> DataFrame:
"""Get the last `n` examples of the DataFrame."""
return self[-n:]
def _get_loc(self, keyidx, materialize: bool = False):
if self.primary_key is None:
raise ValueError(
"Cannot use `loc` without a primary key. Set a primary key using "
"`set_primary_key`."
)
if isinstance(
keyidx, (np.ndarray, list, tuple, pd.Series, torch.Tensor, Column)
):
posidxs = self.primary_key._keyidxs_to_posidxs(keyidx)
return self._clone(
data=self.data.apply("_get", index=posidxs, materialize=materialize)
)
else:
posidx = self.primary_key._keyidx_to_posidx(keyidx)
row = self.data.apply("_get", index=posidx, materialize=materialize)
return {k: row[k] for k in self.columns}
def _set_loc(self, keyidx: Union[str, int], column: str, value: any):
if self.primary_key is None:
raise ValueError(
"Cannot use `loc` without a primary key. Set a primary key using "
"`set_primary_key`."
)
if isinstance(
keyidx, (np.ndarray, list, tuple, pd.Series, torch.Tensor, Column)
):
raise NotImplementedError("")
else:
posidx = self.primary_key._keyidx_to_posidx(keyidx)
self[column][posidx] = value
if self.has_inode():
mod = DataFrameModification(id=self.inode.id, scope=self.columns)
mod.add_to_queue()
def _get(self, posidx, materialize: bool = False):
if isinstance(posidx, str):
# str index => column selection (AbstractColumn)
if posidx in self.columns:
return self.data[posidx]
raise KeyError(f"Column `{posidx}` does not exist.")
elif isinstance(posidx, int):
# int index => single row (dict)
row = self.data.apply("_get", index=posidx, materialize=materialize)
return Row({k: row[k] for k in self.columns})
# cases where `index` returns a dataframe
index_type = None
if isinstance(posidx, slice):
# slice index => multiple row selection (DataFrame)
index_type = "row"
elif (isinstance(posidx, tuple) or isinstance(posidx, list)) and len(posidx):
# tuple or list index => multiple row selection (DataFrame)
if isinstance(posidx[0], str):
index_type = "column"
else:
index_type = "row"
elif isinstance(posidx, np.ndarray):
if len(posidx.shape) != 1:
raise ValueError(
"Index must have 1 axis, not {}".format(len(posidx.shape))
)
# numpy array index => multiple row selection (DataFrame)
index_type = "row"
elif torch.is_tensor(posidx):
if len(posidx.shape) != 1:
raise ValueError(
"Index must have 1 axis, not {}".format(len(posidx.shape))
)
# torch tensor index => multiple row selection (DataFrame)
index_type = "row"
elif isinstance(posidx, pd.Series):
index_type = "row"
elif isinstance(posidx, Column):
# column index => multiple row selection (DataFrame)
index_type = "row"
else:
raise TypeError("Invalid index type: {}".format(type(posidx)))
if index_type == "column":
if not set(posidx).issubset(self.columns):
missing_cols = set(posidx) - set(self.columns)
raise KeyError(f"DataFrame does not have columns {missing_cols}")
df = self._clone(data=self.data[posidx])
return df
elif index_type == "row": # pragma: no cover
return self._clone(
data=self.data.apply("_get", index=posidx, materialize=materialize)
)
# @capture_provenance(capture_args=[])
@reactive()
def __getitem__(self, posidx):
return self._get(posidx, materialize=False)
def __setitem__(self, posidx, value):
self.add_column(name=posidx, data=value, overwrite=True)
# This condition will issue modifications even if we're outside an endpoint
# but those modifications will be cleared by the endpoint before it is
# run.
if self.has_inode():
columns = self.columns
if isinstance(columns, Store):
# df modifications expects a list, not a Store[list]
columns = columns.value
# Add a modification if it's on the graph
mod = DataFrameModification(id=self.id, scope=columns)
mod.add_to_queue()
def __call__(self):
return self._get(slice(0, len(self), 1), materialize=True)
def set(self, value: DataFrame):
"""Set the data of this DataFrame to the data of another DataFrame.
This is used inside endpoints to tell Meerkat when a DataFrame
has been modified. Calling this method outside of an endpoint
will not have any effect on the graph.
"""
self._set_data(value._data)
self._set_state(value._get_state())
if self.has_inode():
columns = self.columns
if isinstance(columns, Store):
columns = columns.value
# Add a modification if it's on the graph
# TODO: think about what the scope should be.
# How does `scope` relate to the the skip_fn mechanism
# in Operation?
mod = DataFrameModification(id=self.inode.id, scope=columns)
mod.add_to_queue()
def consolidate(self):
self.data.consolidate()
@classmethod
# @capture_provenance()
def from_batch(
cls,
batch: Batch,
) -> DataFrame:
"""Convert a batch to a Dataset."""
return cls(batch)
@classmethod
# @capture_provenance()
def from_batches(
cls,
batches: Sequence[Batch],
) -> DataFrame:
"""Convert a list of batches to a dataset."""
return cls.from_batch(
tz.merge_with(
tz.compose(list, tz.concat),
*batches,
),
)
@classmethod
# @capture_provenance()
def from_pandas(
cls,
df: pd.DataFrame,
index: bool = True,
primary_key: Optional[str] = None,
) -> DataFrame:
"""Create a Meerkat DataFrame from a Pandas DataFrame.
.. warning::
In Meerkat, column names must be strings, so non-string column
names in the Pandas DataFrame will be converted.
Args:
df: The Pandas DataFrame to convert.
index: Whether to include the index of the Pandas DataFrame as a
column in the Meerkat DataFrame.
primary_key: The name of the column to use as the primary key.
If `index` is True and `primary_key` is None, the index will
be used as the primary key. If `index` is False, then no
primary key will be set. Optional default is None.
Returns:
DataFrame: The Meerkat DataFrame.
"""
# column names must be str in meerkat
df = df.rename(mapper=str, axis="columns")
pandas_df = df
df = cls.from_batch(
pandas_df.to_dict("series"),
)
if index:
df["index"] = pandas_df.index.to_series()
if primary_key is None:
df.set_primary_key("index", inplace=True)
if primary_key is not None:
df.set_primary_key(primary_key, inplace=True)
return df
@classmethod
# @capture_provenance()
def from_arrow(
cls,
table: pa.Table,
):
"""Create a Dataset from a pandas DataFrame."""
from meerkat.block.arrow_block import ArrowBlock
from meerkat.columns.scalar.arrow import ArrowScalarColumn
block_views = ArrowBlock.from_block_data(table)
return cls.from_batch(
{view.block_index: ArrowScalarColumn(view) for view in block_views}
)
@classmethod
def from_huggingface(cls, *args, **kwargs):
"""Load a Huggingface dataset as a DataFrame.
Use this to replace `datasets.load_dataset`, so
>>> dict_of_datasets = datasets.load_dataset('boolq')
becomes
>>> dict_of_dataframes = DataFrame.from_huggingface('boolq')
"""
import datasets
datasets.logging.set_verbosity_error()
import pyarrow.compute as pc
# Load the dataset
dataset = datasets.load_dataset(*args, **kwargs)
def _convert_columns(dataset: datasets.Dataset):
df = cls.from_arrow(dataset._data.table)
for name, feature in dataset.features.items():
if isinstance(feature, datasets.Audio):
column = df[name]
bytes = ArrowScalarColumn(pc.struct_field(column._data, "bytes"))
path = ArrowScalarColumn(pc.struct_field(column._data, "path"))
if (~bytes.isnull()).all():
from meerkat.interactive.formatter import AudioFormatterGroup
df[name] = (
df[name]
.defer(lambda x: x["bytes"])
.format(AudioFormatterGroup())
)
elif (~path.isnull()).all():
from meerkat.columns.deferred.file import FileColumn
df[name] = FileColumn(path, type="audio")
else:
raise ValueError(
"Huggingface column must either provide bytes or path for "
"every row."
)
elif isinstance(feature, datasets.Image):
column = df[name]
bytes = ArrowScalarColumn(pc.struct_field(column._data, "bytes"))
path = ArrowScalarColumn(pc.struct_field(column._data, "path"))
if (~ArrowScalarColumn(bytes).isnull()).all():
import io
from PIL import Image
from meerkat.interactive.formatter import ImageFormatterGroup
df[name] = bytes.defer(
lambda x: Image.open(io.BytesIO(x))
).format(ImageFormatterGroup().defer())
elif (~path.isnull()).all():
from meerkat.columns.deferred.file import FileColumn
df[name] = FileColumn(path, type="image")
else:
raise ValueError(
"Huggingface column must either provide bytes or path for "
"every row."
)
return df
if isinstance(dataset, dict):
return dict(
map(
lambda t: (
t[0],
_convert_columns(t[1]),
),
dataset.items(),
)
)
else:
return _convert_columns(dataset)
@classmethod
# @capture_provenance(capture_args=["filepath"])
def from_csv(
cls,
filepath: str,
primary_key: str = None,
backend: str = "pandas",
*args,
**kwargs,
) -> DataFrame:
"""Create a DataFrame from a csv file. All of the columns will be
:class:`meerkat.ScalarColumn` with backend Pandas.
Args:
filepath (str): The file path or buffer to load from.
Same as :func:`pandas.read_csv`.
*args: Argument list for :func:`pandas.read_csv`.
**kwargs: Keyword arguments forwarded to :func:`pandas.read_csv`.
Returns:
DataFrame: The constructed dataframe.
"""
if backend == "pandas":
df = cls.from_pandas(pd.read_csv(filepath, *args, **kwargs), index=False)
elif backend == "arrow":
df = cls.from_arrow(pa.csv.read_csv(filepath, *args, **kwargs))
if primary_key is not None:
df.set_primary_key(primary_key, inplace=True)
return df
@classmethod
# @capture_provenance()
def from_feather(
cls,
filepath: str,
primary_key: str = None,
columns: Optional[Sequence[str]] = None,
use_threads: bool = True,
**kwargs,
) -> DataFrame:
"""Create a DataFrame from a feather file. All of the columns will be
:class:`meerkat.ScalarColumn` with backend Pandas.
Args:
filepath (str): The file path or buffer to load from.
Same as :func:`pandas.read_feather`.
columns (Optional[Sequence[str]]): The columns to load.
Same as :func:`pandas.read_feather`.
use_threads (bool): Whether to use threads to read the file.
Same as :func:`pandas.read_feather`.
**kwargs: Keyword arguments forwarded to :func:`pandas.read_feather`.
Returns:
DataFrame: The constructed dataframe.
"""
df = cls.from_pandas(
pd.read_feather(
filepath, columns=columns, use_threads=use_threads, **kwargs
),
index=False,
)
if primary_key is not None:
df.set_primary_key(primary_key, inplace=True)
return df
@classmethod
# @capture_provenance()
def from_parquet(
cls,
filepath: str,
primary_key: str = None,
engine: str = "auto",
columns: Optional[Sequence[str]] = None,
**kwargs,
) -> DataFrame:
"""Create a DataFrame from a parquet file. All of the columns will be
:class:`meerkat.ScalarColumn` with backend Pandas.
Args:
filepath (str): The file path or buffer to load from.
Same as :func:`pandas.read_parquet`.
engine (str): The parquet engine to use.
Same as :func:`pandas.read_parquet`.
columns (Optional[Sequence[str]]): The columns to load.
Same as :func:`pandas.read_parquet`.
**kwargs: Keyword arguments forwarded to :func:`pandas.read_parquet`.
Returns:
DataFrame: The constructed dataframe.
"""
df = cls.from_pandas(
pd.read_parquet(filepath, engine=engine, columns=columns, **kwargs),
index=False,
)
if primary_key is not None:
df.set_primary_key(primary_key, inplace=True)
return df
@classmethod
def from_json(
cls,
filepath: str,
primary_key: str = None,
orient: str = "records",
lines: bool = False,
backend: str = "pandas",
**kwargs,
) -> DataFrame:
"""Load a DataFrame from a json file.
By default, data in the JSON file should be a list of dictionaries, each with
an entry for each column. This is the ``orient="records"`` format. If the data
is in a different format in the JSON, you can specify the ``orient`` parameter.
See :func:`pandas.read_json` for more details.
Args:
filepath (str): The file path or buffer to load from.
Same as :func:`pandas.read_json`.
orient (str): The expected JSON string format. Options are:
"split", "records", "index", "columns", "values".
Same as :func:`pandas.read_json`.
lines (bool): Whether the json file is a jsonl file.
Same as :func:`pandas.read_json`.
backend (str): The backend to use for the loading and reuslting columns.
**kwargs: Keyword arguments forwarded to :func:`pandas.read_json`.
Returns:
DataFrame: The constructed dataframe.
"""
from pyarrow import json
if backend == "arrow":
if lines is False:
raise ValueError("Arrow backend only supports lines=True.")
df = cls.from_arrow(json.read_json(filepath, **kwargs))
elif backend == "pandas":
df = cls.from_pandas(
pd.read_json(filepath, orient=orient, lines=lines, **kwargs),
index=False,
)
if primary_key is not None:
df.set_primary_key(primary_key, inplace=True)
return df
def _to_base(self, base: str = "pandas", **kwargs) -> Dict[str, Any]:
"""Helper method to convert a Meerkat DataFrame to a base format."""
data = {}
for name, column in self.items():
try:
converted_column = getattr(column, f"to_{base}")(**kwargs)
except ConversionError:
warnings.warn(
f"Could not convert column {name} of type {type(column)}, it will "
"be dropped from the output."
)
continue
data[name] = converted_column
return data
def to_pandas(
self, index: bool = False, allow_objects: bool = False
) -> pd.DataFrame:
"""Convert a Meerkat DataFrame to a Pandas DataFrame.
Args:
index (bool): Use the primary key as the index of the Pandas
DataFrame. Defaults to False.
Returns:
pd.DataFrame: The constructed dataframe.
"""
df = pd.DataFrame(self._to_base(base="pandas", allow_objects=allow_objects))
if index:
df.set_index(self.primary_key, inplace=True)
return df
def to_arrow(self) -> pd.DataFrame:
"""Convert a Meerkat DataFrame to an Arrow Table.
Returns:
pa.Table: The constructed table.
"""
return pa.table(self._to_base(base="arrow"))
def _choose_engine(self):
"""Choose which library to use for export.
If any of the columns are ArrowScalarColumn, then use Arrow.
"""
for column in self.values():
if isinstance(column, ArrowScalarColumn):
return "arrow"
return "pandas"
def to_csv(self, filepath: str, engine: str = "auto"):
"""Save a DataFrame to a csv file.
The engine used to write the csv to disk.
Args:
filepath (str): The file path to save to.
engine (str): The library to use to write the csv. One of ["pandas",
"arrow", "auto"]. If "auto", then the library will be chosen based on
the column types.
"""
if engine == "auto":
engine = self._choose_engine()
if engine == "arrow":
from pyarrow.csv import write_csv
write_csv(self.to_arrow(), filepath)
else:
self.to_pandas().to_csv(filepath, index=False)
def to_feather(self, filepath: str, engine: str = "auto"):
"""Save a DataFrame to a feather file.
The engine used to write the feather to disk.
Args:
filepath (str): The file path to save to.
engine (str): The library to use to write the feather. One of ["pandas",
"arrow", "auto"]. If "auto", then the library will be chosen based on
the column types.
"""
if engine == "auto":
engine = self._choose_engine()
if engine == "arrow":
from pyarrow.feather import write_feather
write_feather(self.to_arrow(), filepath)
else:
self.to_pandas().to_feather(filepath)
def to_parquet(self, filepath: str, engine: str = "auto"):
"""Save a DataFrame to a parquet file.
The engine used to write the parquet to disk.
Args:
filepath (str): The file path to save to.
engine (str): The library to use to write the parquet. One of ["pandas",
"arrow", "auto"]. If "auto", then the library will be chosen based on
the column types.
"""
if engine == "auto":
engine = self._choose_engine()
if engine == "arrow":
from pyarrow.parquet import write_table
write_table(self.to_arrow(), filepath)
else:
self.to_pandas().to_parquet(filepath)
def to_json(
self, filepath: str, lines: bool = False, orient: str = "records"
) -> None:
"""Save a Dataset to a json file.
Args:
filepath (str): The file path to save to.
lines (bool): Whether to write the json file as a jsonl file.
orient (str): The orientation of the json file.
Same as :func:`pandas.DataFrame.to_json`.
"""
self.to_pandas().to_json(filepath, lines=lines, orient=orient)
def _get_collate_fns(self, columns: Iterable[str] = None):
columns = self.data.keys() if columns is None else columns
return {name: self.data[name].collate for name in columns}
def _collate(self, batch: List):
batch = tz.merge_with(list, *batch)
column_to_collate = self._get_collate_fns(batch.keys())
new_batch = {}
for name, values in batch.items():
new_batch[name] = column_to_collate[name](values)
df = self._clone(data=new_batch)
return df
@staticmethod
def _convert_to_batch_fn(
function: Callable, with_indices: bool, materialize: bool = True, **kwargs
) -> callable:
return convert_to_batch_fn(
function=function,
with_indices=with_indices,
materialize=materialize,
**kwargs,
)
def batch(
self,
batch_size: int = 1,
drop_last_batch: bool = False,
num_workers: int = 0,
materialize: bool = True,
shuffle: bool = False,
*args,
**kwargs,
):
"""Batch the dataset.
TODO:
Args:
batch_size: integer batch size
drop_last_batch: drop the last batch if its smaller than batch_size
Returns:
batches of data
"""
cell_columns, batch_columns = [], []
from meerkat.columns.deferred.base import DeferredColumn
for name, column in self.items():
if isinstance(column, DeferredColumn) and materialize:
cell_columns.append(name)
else:
batch_columns.append(name)
indices = np.arange(len(self))
if shuffle:
indices = np.random.permutation(indices)
if batch_columns:
batch_indices = []
for i in range(0, len(self), batch_size):
if drop_last_batch and i + batch_size > len(self):
continue
batch_indices.append(indices[i : i + batch_size])
batch_dl = torch.utils.data.DataLoader(
self[batch_columns],
sampler=batch_indices,
batch_size=None,
batch_sampler=None,
drop_last=drop_last_batch,
num_workers=num_workers,
*args,
**kwargs,
)
if cell_columns:
df = self[cell_columns] if not shuffle else self[cell_columns][indices]
cell_dl = torch.utils.data.DataLoader(
df,
batch_size=batch_size,
collate_fn=self._collate,
drop_last=drop_last_batch,
num_workers=num_workers,
*args,
**kwargs,
)
if batch_columns and cell_columns:
for cell_batch, batch_batch in zip(cell_dl, batch_dl):
out = self._clone(data={**cell_batch.data, **batch_batch.data})
yield out() if materialize else out
elif batch_columns:
for batch_batch in batch_dl:
yield batch_batch() if materialize else batch_batch
elif cell_columns:
for cell_batch in cell_dl:
yield cell_batch() if materialize else cell_batch
# @capture_provenance(capture_args=["with_indices"])
def update(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
remove_columns: Optional[List[str]] = None,
num_workers: int = 0,
output_type: Union[type, Dict[str, type]] = None,
mmap: bool = False,
mmap_path: str = None,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> DataFrame:
"""Update the columns of the dataset."""
# TODO(karan): make this fn go faster
# most of the time is spent on the merge, speed it up further
# Return if `self` has no examples
if not len(self):
logger.info("Dataset empty, returning None.")
return self
# Get some information about the function
df = self[input_columns] if input_columns is not None else self
function_properties = df._inspect_function(
function, with_indices, is_batched_fn, materialize=materialize, **kwargs
)
assert (
function_properties.dict_output
), f"`function` {function} must return dict."
if not is_batched_fn:
# Convert to a batch function
function = convert_to_batch_fn(
function, with_indices=with_indices, materialize=materialize, **kwargs
)
logger.info(f"Converting `function` {function} to batched function.")
# Update always returns a new dataset
logger.info("Running update, a new dataset will be returned.")
# Copy the ._data dict with a reference to the actual columns
new_df = self.view()
# Calculate the values for the new columns using a .map()
output = new_df.map(
function=function,
with_indices=with_indices,
is_batched_fn=True,
batch_size=batch_size,
num_workers=num_workers,
output_type=output_type,
input_columns=input_columns,
mmap=mmap,
mmap_path=mmap_path,
materialize=materialize,
pbar=pbar,
**kwargs,
)
# Add new columns for the update
for col, vals in output.data.items():
new_df.add_column(col, vals, overwrite=True)
# Remove columns
if remove_columns:
for col in remove_columns:
new_df.remove_column(col)
logger.info(f"Removed columns {remove_columns}.")
return new_df
def map(
self,
function: Callable,
is_batched_fn: bool = False,
batch_size: int = 1,
inputs: Union[Mapping[str, str], Sequence[str]] = None,
outputs: Union[Mapping[any, str], Sequence[str]] = None,
output_type: Union[Mapping[str, Type["Column"]], Type["Column"]] = None,
materialize: bool = True,
**kwargs,
) -> Optional[Union[Dict, List, Column]]:
from meerkat.ops.map import map
return map(
data=self,
function=function,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
inputs=inputs,
outputs=outputs,
output_type=output_type,
materialize=materialize,
**kwargs,
)
# @capture_provenance(capture_args=["function"])
@reactive()
def filter(
self,
function: Optional[Callable] = None,
with_indices=False,
input_columns: Optional[Union[str, List[str]]] = None,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
drop_last_batch: bool = False,
num_workers: int = 0,
materialize: bool = True,
pbar: bool = False,
**kwargs,
) -> Optional[DataFrame]:
"""Filter operation on the DataFrame."""
# Return if `self` has no examples
if not len(self):
logger.info("DataFrame empty, returning None.")
return None
# Get some information about the function
df = self[input_columns] if input_columns is not None else self
function_properties = df._inspect_function(
function,
with_indices,
is_batched_fn=is_batched_fn,
materialize=materialize,
**kwargs,
)
assert function_properties.bool_output, "function must return boolean."
# Map to get the boolean outputs and indices
logger.info("Running `filter`, a new DataFrame will be returned.")
outputs = self.map(
function=function,
with_indices=with_indices,
input_columns=input_columns,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_workers=num_workers,
materialize=materialize,
pbar=pbar,
**kwargs,
)
indices = np.where(outputs)[0]
# filter returns a new dataframe
return self[indices]
@reactive()
def merge(
self,
right: meerkat.DataFrame,
how: str = "inner",
on: Union[str, List[str]] = None,
left_on: Union[str, List[str]] = None,
right_on: Union[str, List[str]] = None,
sort: bool = False,
suffixes: Sequence[str] = ("_x", "_y"),
validate=None,
):
from meerkat import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
sort=sort,
suffixes=suffixes,
validate=validate,
)
@reactive()
def sort(
self,
by: Union[str, List[str]],
ascending: Union[bool, List[bool]] = True,
kind: str = "quicksort",
) -> DataFrame:
"""Sort the DataFrame by the values in the specified columns. Similar
to ``sort_values`` in pandas.
Args:
by (Union[str, List[str]]): The columns to sort by.
ascending (Union[bool, List[bool]]): Whether to sort in ascending or
descending order. If a list, must be the same length as `by`.Defaults
to True.
kind (str): The kind of sort to use. Defaults to 'quicksort'. Options
include 'quicksort', 'mergesort', 'heapsort', 'stable'.
Return:
DataFrame: A sorted view of DataFrame.
"""
from meerkat import sort
return sort(data=self, by=by, ascending=ascending, kind=kind)
@reactive()
def sample(
self,
n: int = None,
frac: float = None,
replace: bool = False,
weights: Union[str, np.ndarray] = None,
random_state: Union[int, np.random.RandomState] = None,
) -> DataFrame:
"""Select a random sample of rows from DataFrame. Roughly equivalent to
``sample`` in Pandas https://pandas.pydata.org/docs/reference/api/panda
s.DataFrame.sample.html.
Args:
n (int): Number of samples to draw. If `frac` is specified, this parameter
should not be passed. Defaults to 1 if `frac` is not passed.
frac (float): Fraction of rows to sample. If `n` is specified, this
parameter should not be passed.
replace (bool): Sample with or without replacement. Defaults to False.
weights (Union[str, np.ndarray]): Weights to use for sampling. If `None`
(default), the rows will be sampled uniformly. If a numpy array, the
sample will be weighted accordingly. If a string, the weights will be
applied to the rows based on the column with the name specified. If
weights do not sum to 1 they will be normalized to sum to 1.
random_state (Union[int, np.random.RandomState]): Random state or seed to
use for sampling.
Return:
DataFrame: A random sample of rows from the DataFrame.
"""
from meerkat import sample
return sample(
data=self,
n=n,
frac=frac,
replace=replace,
weights=weights,
random_state=random_state,
)
@reactive()
def shuffle(self, seed: int = None) -> DataFrame:
"""Shuffle the rows of the DataFrame out-of-place.
Args:
seed (int): Random seed to use for shuffling.
Returns:
DataFrame: A shuffled view of the DataFrame.
"""
from meerkat import shuffle
return shuffle(data=self, seed=seed)
@reactive()
def rename(
self,
mapper: Union[Dict, Callable] = None,
errors: Literal["ignore", "raise"] = "ignore",
) -> DataFrame:
"""Return a new DataFrame with the specified column labels renamed.
Dictionary values must be unique (1-to-1). Labels not specified will be
left unchanged. Extra labels will not throw an error.
Args:
mapper (Union[Dict, Callable], optional): Dict-like of function
transformations to apply to the values of the columns. Defaults
to None.
errors (Literal['ignore', 'raise'], optional): If 'raise', raise a
KeyError when the Dict contains labels that do not exist in the
DataFrame. If 'ignore', extra keys will be ignored. Defaults to
'ignore'.
Raises:
ValueError: _description_
Returns:
DataFrame: A new DataFrame with the specified column labels renamed.
"""
# Copy the ._data dict with a reference to the actual columns
new_df = self.view()
new_primary_key = None
if isinstance(mapper, Dict):
assert len(set(mapper.values())) == len(
mapper.values()
), "Dictionary values must be unique (1-to-1)."
names = self.columns # used to preserve order of columns
for i, (old_name, new_name) in enumerate(mapper.items()):
if old_name not in self.keys():
if errors == "raise":
raise KeyError(
f"Cannot rename nonexistent column `{old_name}`."
)
continue
if old_name == new_name:
continue
if self.primary_key_name == old_name:
new_primary_key = new_name
# Copy old column into new column
new_df[new_name] = new_df[old_name]
new_df.remove_column(old_name)
names[names.index(old_name)] = new_name
new_df = new_df[names]
elif isinstance(mapper, Callable):
for old_name in new_df.columns:
new_name = mapper(old_name)
if old_name == new_name:
continue
if self.primary_key_name == old_name:
new_primary_key = new_name
# Copy old column into new column
new_df[new_name] = new_df[old_name]
new_df.remove_column(old_name)
else:
logger.info(
f"Mapper type is not one of Dict or Callable: {type(mapper)}. Returning"
)
if new_primary_key is not None:
new_df = new_df.set_primary_key(new_primary_key)
return new_df
@reactive()
def drop(
self, columns: Union[str, Collection[str]], check_exists=True
) -> DataFrame:
"""Return a new DataFrame with the specified columns dropped.
Args:
columns (Union[str, Collection[str]]): The columns to drop.
Return:
DataFrame: A new DataFrame with the specified columns dropped.
"""
if isinstance(columns, str):
columns = [columns]
for c in columns:
if c not in self.columns and check_exists:
raise ValueError(
f"Cannot drop nonexistent column '{c}' from DataFrame."
)
return self[[c for c in self.columns if c not in columns]]
def items(self):
# TODO: Add support for decorating iterators with reactive.
for name in self.columns:
yield name, self.data[name]
@reactive()
def keys(self):
return self.columns
def iterrows(self):
for i in range(len(self)):
yield self[i]
def values(self):
# TODO: Add support for decorating iterators with reactive.
for name in self.columns:
yield self.data[name]
@classmethod
def read(
cls,
path: str,
overwrite: bool = False,
*args,
**kwargs,
) -> DataFrame:
"""Load a DataFrame stored on disk."""
from meerkat.datasets.utils import download_df, extract_tar_file
# URL
if path.startswith("http://") or path.startswith("https://"):
return download_df(path, overwrite=overwrite)
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
raise ValueError(f"Path does not exist: {path}")
# Compressed file.
# TODO: Add support for other compressed formats.
if path.endswith(".tar.gz"):
download_dir = os.path.join(os.path.dirname(path), ".mktemp")
cache_dir = os.path.join(
download_dir, os.path.basename(path).split(".tar.gz")[0]
)
os.makedirs(download_dir, exist_ok=True)
extract_tar_file(path, download_dir=download_dir)
df = cls.read(cache_dir, overwrite=overwrite, *args, **kwargs)
shutil.rmtree(download_dir)
return df
# Load the metadata
metadata = load_yaml(os.path.join(path, "meta.yaml"))
if "state" in metadata:
state = metadata["state"]
else:
# backwards compatability to dill dataframes
state = dill.load(open(os.path.join(path, "state.dill"), "rb"))
df = cls.__new__(cls)
df._set_id() # TODO: consider if we want to persist this id
df._set_inode() # FIXME: this seems hacky
df._set_state(state)
# Load the the manager
mgr_dir = os.path.join(path, "mgr")
if os.path.exists(mgr_dir):
data = BlockManager.read(mgr_dir, **kwargs)
else:
# backwards compatability to pre-manager dataframes
data = {
name: dtype.read(os.path.join(path, "columns", name), *args, **kwargs)
for name, dtype in metadata["column_dtypes"].items()
}
df._set_data(data)
return df
def write(
self,
path: str,
) -> None:
"""Save a DataFrame to disk."""
# Make all the directories to the path
path = os.path.abspath(os.path.expanduser(path))
os.makedirs(path, exist_ok=True)
# Get the DataFrame state
state = self._get_state()
# Get the metadata
metadata = {
"dtype": type(self),
"column_dtypes": {name: type(col) for name, col in self.data.items()},
"len": len(self),
"state": state,
}
# write the block manager
mgr_dir = os.path.join(path, "mgr")
self.data.write(mgr_dir)
# Save the metadata as a yaml file
metadata_path = os.path.join(path, "meta.yaml")
dump_yaml(metadata, metadata_path)
def to_huggingface(self, repository, commit_message: str = None):
"""Upload a DataFrame to a HuggingFace repository.
This method will dump the dataframe into the ``repository.local_dir``.
If ``commit_message`` is specified, the repository will be pushed to the hub.
The dataframe can then be accessed with:
>>> repo = huggingface_hub.snapshot_download(repository)
>>> # or repo = huggingface_hub.Repository(clone_from=repository)
>>> df = mk.read(repo)
Args:
repository: The huggingface_hub.Repository object to upload to.
commit_message: The commit message to use when pushing to the huggingface.
Note:
This will overwrite the existing DataFrame in the repository.
Example:
>>> repo = huggingface_hub.Repository(
... local_dir="my-dataset",
... clone_from="user/my-dataset",
... repo_type="dataset")
>>> df.to_huggingface(repo, commit_message="uploading dataframe")
"""
repository.git_pull()
self.write(repository.local_dir)
repository.push_to_hub(commit_message=commit_message)
@classmethod
def _state_keys(cls) -> Set[str]:
"""List of attributes that describe the state of the object."""
return {"_primary_key"}
def _set_state(self, state: dict):
self.__dict__.update(state)
# backwards compatibility for old dataframes
if "_primary_key" not in state:
self._primary_key = None
def _view_data(self) -> object:
return self.data.view()
def _copy_data(self) -> object:
return self.data.copy()
def __finalize__(self, *args, **kwargs):
return self
@reactive()
def groupby(self, *args, **kwargs):
from meerkat.ops.sliceby.groupby import groupby
return groupby(self, *args, **kwargs)
@reactive()
def sliceby(self, *args, **kwargs):
from meerkat.ops.sliceby.sliceby import sliceby
return sliceby(self, *args, **kwargs)
@reactive()
def clusterby(self, *args, **kwargs):
from meerkat.ops.sliceby.clusterby import clusterby
return clusterby(self, *args, **kwargs)
@reactive()
def explainby(self, *args, **kwargs):
from meerkat.ops.sliceby.explainby import explainby
return explainby(self, *args, **kwargs)
@reactive()
def aggregate(
self, function: Union[str, Callable], nuisance: str = "drop", *args, **kwargs
) -> Dict[str, Any]:
from meerkat.ops.aggregate.aggregate import aggregate
return aggregate(self, function, *args, **kwargs)
@reactive()
def mean(self, *args, nuisance: str = "drop", **kwargs):
from meerkat.ops.aggregate.aggregate import aggregate
return aggregate(self, function="mean", nuisance=nuisance, *args, **kwargs)
def is_listlike(obj) -> bool:
"""Check if the object is listlike.
Args:
obj (object): The object to check.
Return:
bool: True if the object is listlike, False otherwise.
"""
is_column = isinstance(obj, Column)
is_sequential = (
hasattr(obj, "__len__")
and hasattr(obj, "__getitem__")
and not isinstance(obj, str)
)
return is_column or is_sequential
DataFrame.mark = DataFrame._react
DataFrame.unmark = DataFrame._no_react
|
meerkat-main
|
meerkat/dataframe.py
|
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import uuid
from enum import Enum
from typing import TYPE_CHECKING, List, Optional
import rich
from jinja2 import Environment, FileSystemLoader
from tabulate import tabulate
from meerkat.tools.singleton import Singleton
if TYPE_CHECKING:
import nbformat as nbf
logger = logging.getLogger(__name__)
# This file is meerkat/meerkat/constants.py
# Assert that the path to this file ends with "meerkat/meerkat/constants.py"
assert os.path.abspath(__file__).endswith("meerkat/constants.py"), (
"This file should end with 'meerkat/meerkat/constants.py'. "
f"Got {__file__}. "
"If it was moved, update the assert"
" and the path to the MEERKAT_BASE_DIR below."
)
# Base directory is meerkat/ (two levels up)
MEERKAT_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Path to the (internal to the Python package) meerkat/interactive/app directory.
MEERKAT_INTERNAL_APP_DIR = os.path.join(
MEERKAT_BASE_DIR, "meerkat", "interactive", "app"
)
# Path to the (internal to Python package) meerkat/interactive/app/build directory.
MEERKAT_INTERNAL_APP_BUILD_DIR = os.path.join(MEERKAT_INTERNAL_APP_DIR, "build")
# Environment variable to set the path to an app directory.
# See PathHelper.appdir for more details.
MEERKAT_APP_DIR = os.environ.get("MEERKAT_APP_DIR", None)
# Name of the Meerkat npm package.
MEERKAT_NPM_PACKAGE = "@meerkat-ml/meerkat"
# Path to the meerkat/interactive/templates directory.
MEERKAT_TEMPLATES_DIR = os.path.join(
MEERKAT_BASE_DIR, "meerkat", "interactive", "templates"
)
# Meerkat demo directory.
MEERKAT_DEMO_DIR = os.path.join(MEERKAT_BASE_DIR, "meerkat-demo")
if not os.path.exists(MEERKAT_DEMO_DIR): # pypi install
MEERKAT_DEMO_DIR = os.path.join(MEERKAT_BASE_DIR, "demo")
if not os.path.exists(MEERKAT_DEMO_DIR): # repo install
MEERKAT_DEMO_DIR = None
# Environment variables that should primarily be used inside the
# subprocess run by `mk run` (i.e. the subprocess that is used to
# run the script that is passed to `mk run`).
# A flag to indicate whether we are running in a subprocess of `mk run`.
MEERKAT_RUN_SUBPROCESS = int(os.environ.get("MEERKAT_RUN_SUBPROCESS", 0))
# The `script_path` that was passed to `mk run`, if we are running in a
# subprocess of `mk run`.
MEERKAT_RUN_SCRIPT_PATH = os.environ.get("MEERKAT_RUN_SCRIPT_PATH", None)
# A unique ID for the current run, available only in the subprocess of `mk run`.
MEERKAT_RUN_ID = os.environ.get("MEERKAT_RUN_ID", uuid.uuid4().hex)
# A flag to indicate whether we are in a `mk` CLI script.
MEERKAT_CLI_PROCESS = os.path.basename(sys.argv[0]) == "mk"
# A flag to indicate whether we are specifically in the `mk run` script.
MEERKAT_RUN_PROCESS = (os.path.basename(sys.argv[0]) == "mk") and (
len(sys.argv) > 1 and (sys.argv[1] == "run" or sys.argv[1] == "demo")
)
# A flag to indicate whether we are specifically in the `mk init` script.
MEERKAT_INIT_PROCESS = (os.path.basename(sys.argv[0]) == "mk") and (
len(sys.argv) > 1 and sys.argv[1] == "init"
)
# Create a Jinja2 environment.
JINJA_ENV = Environment(loader=FileSystemLoader(MEERKAT_TEMPLATES_DIR))
def write_file(path: str, content: str) -> None:
with open(path, "w") as f:
f.write(content)
def is_notebook() -> bool:
"""Check if the current environment is a notebook.
Taken from
https://stackoverflow.com/questions/15411967/how-can-i-check-if-code\
-is-executed-in-the-ipython-notebook.
"""
try:
shell = get_ipython()
if shell.__class__.__name__ == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
elif shell.__class__.__module__ == "google.colab._shell":
return True
elif shell.__class__.__name__ == "TerminalInteractiveShell":
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
class PackageManager(str, Enum):
npm = "npm"
bun = "bun"
class PathHelper(metaclass=Singleton):
"""Information about important paths."""
def __init__(self) -> None:
# Cache properties.
self._rundir = None
self._scriptpath = None
self._appdir = None
@property
def rundir(self):
"""The directory from which the current script was run.
This is the directory from which the `python <script>` or `mk`
command was run.
"""
if self._rundir is None:
self._rundir = os.getcwd()
return self._rundir
@property
def scriptpath(self):
"""The path to the current script being run.
This is the path to the script as it is passed to `python
<script>`. This will return the correct path even if the script
is run with `mk run`.
"""
if self._scriptpath is None:
if MEERKAT_RUN_PROCESS:
# If we are in the `mk run` process, then we can use sys.argv[2].
# This is the path to the script as it is passed to `mk run`.
self._scriptpath = sys.argv[2]
elif MEERKAT_RUN_SUBPROCESS:
# If we are running in a subprocess of `mk run`, then we can't use
# sys.argv[0] because it will be the path to the `mk` script, not
# the path to the script that was passed to `mk run`.
# Instead, we use the MEERKAT_RUN_SCRIPT_PATH environment variable.
assert (
MEERKAT_RUN_SCRIPT_PATH is not None
), "Something is wrong. MEERKAT_RUN_SCRIPT_PATH should be set"
" by `mk run`."
self._scriptpath = MEERKAT_RUN_SCRIPT_PATH
elif is_notebook():
# If we are running inside a notebook, then we return None.
self._scriptpath = None
else:
# If we are not running in a subprocess of `mk run`, then we
# can use sys.argv[0]. This is the path to the script as it is
# passed to `python <script>`.
self._scriptpath = sys.argv[0]
return self._scriptpath
@property
def scriptpath_abs(self):
"""The absolute path to the current script.
See `scriptpath` for more information.
"""
if is_notebook():
return None
return os.path.abspath(self.scriptpath)
@property
def scriptdir_abs(self):
"""The directory containing the current script.
See `scriptpath` for more information.
"""
if is_notebook():
return None
return os.path.abspath(os.path.dirname(self.scriptpath_abs))
@property
def scriptname(self):
"""The name of the current script."""
if is_notebook():
return None
return os.path.basename(self.scriptpath_abs)
@property
def appdir(self):
"""The absolute path to the app/ directory.
Rules for determining the app directory:
1. By default, the app directory points to the internal app directory,
which is MEERKAT_INTERNAL_APP_DIR.
2. This is not the case if the script or notebook that is being run is
next to an app/ directory. In this case, Meerkat should infer that
this directory should be used as the app directory.
3. This is also not the case if the user explicitly sets the app directory.
Either by
passing in a flag to `mk run` on the CLI, or
passing in an argument to `mk.gui.start` in a Python script
or notebook, or setting the MEERKAT_APP_DIR environment variable.
4. Finally, none of these rules apply if the user is running the `mk init`
script. In this case, the app directory will be created, and we should
point to this location.
"""
if self._appdir is not None:
return self._appdir
# If the user is running the `mk init` script, then the app directory
# will be created.
if MEERKAT_INIT_PROCESS:
# `rundir` is the directory from which the `mk` script was run.
self._appdir = os.path.join(PathHelper().rundir, "app")
return self._appdir
# Set the default app directory.
self._appdir = MEERKAT_INTERNAL_APP_DIR
if is_notebook():
# If we are running inside a notebook, and the notebook is next to
# an app/ directory, then use that as the app directory.
if MEERKAT_APP_DIR is not None:
self._appdir = MEERKAT_APP_DIR
self._appdir = os.path.abspath(self._appdir)
return self._appdir
candidate_path = os.path.join(os.getcwd(), "app")
if os.path.exists(candidate_path):
# The notebook is next to an app/ directory. Point to this directory.
self._appdir = candidate_path
return self._appdir
# If the script is next to an app/ directory, then use that
# as the app directory.
candidate_path = os.path.join(PathHelper().scriptdir_abs, "app")
if os.path.exists(candidate_path):
# The script is next to an app/ directory. Point to this directory.
self._appdir = candidate_path
# If the user has explicitly set the app directory, then use
# that as the app directory.
if MEERKAT_APP_DIR is not None:
# The user has explicitly set the app directory.
# Point to this directory.
self._appdir = MEERKAT_APP_DIR
# Use the absolute path.
self._appdir = os.path.abspath(self._appdir)
return self._appdir
@appdir.setter
def appdir(self, value):
self._appdir = os.path.abspath(value)
def use_internal_appdir(self):
"""Set the path to the internal app directory."""
self.appdir = MEERKAT_INTERNAL_APP_DIR
@property
def is_user_app(self) -> bool:
"""Returns True if the app directory being used does not point to the
internal Meerkat app directory."""
return self.appdir != MEERKAT_INTERNAL_APP_DIR
def __repr__(self) -> str:
return f"""\
{self.__class__.__name__}(
rundir={self.rundir},
scriptpath={self.scriptpath},
scriptpath_abs={self.scriptpath_abs},
scriptdir_abs={self.scriptdir_abs},
scriptname={self.scriptname},
appdir={self.appdir},
)\
"""
# A counter to indicate how many times the script has been reloaded.
if os.path.exists(f"{PathHelper().appdir}/.{MEERKAT_RUN_ID}.reload"):
MEERKAT_RUN_RELOAD_COUNT = int(
open(f"{PathHelper().appdir}/.{MEERKAT_RUN_ID}.reload", "r").read()
)
else:
MEERKAT_RUN_RELOAD_COUNT = 0
class App:
def __init__(
self,
appdir: str,
appname: Optional[str] = None,
package_manager: PackageManager = "npm",
):
self.appdir = os.path.abspath(appdir)
self.appname = appname
self.package_manager = package_manager
def set_package_manager(self, package_manager: PackageManager):
self.package_manager = package_manager
@property
def is_user_app(self) -> bool:
"""Returns True if the app directory being used does not point to the
internal Meerkat app directory."""
return self.appdir != MEERKAT_INTERNAL_APP_DIR
def create(self):
"""Create the app directory."""
os.makedirs(self.appdir, exist_ok=True)
def exists(self):
"""Check if the app directory exists."""
return os.path.exists(self.appdir)
def filter_installed_libraries(self, libraries: List[str]) -> List[str]:
"""Given a list of libraries, return the libraries that are installed
in the app directory.
Args:
libraries (List[str]): List of libraries to check
Returns:
List[str]: List of libraries that are installed
"""
# Check in node_modules
installed_libraries = []
for library in libraries:
if os.path.exists(os.path.join(self.appdir, "node_modules", library)):
installed_libraries.append(library)
logger.debug(f"Installed libraries: {installed_libraries}")
return installed_libraries
# KG: This is the correct way to check if a library is installed, but
# it takes around 0.4s, which is too slow for it to be done on every
# import. Leave this code here for now, but we should find a way to
# use it in the future (or perhaps use package-lock.json). (TODO)
p = subprocess.run(
["npm", "list"] + list(libraries) + ["--parseable", "--silent"],
cwd=self.appdir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
output = p.stdout.decode("utf-8").splitlines()
# output consists of a list of paths to the installed package and
# omits libraries that are not installed
installed_libraries = [p for p in libraries for o in output if p in o]
return installed_libraries
def get_mk_package_info(self) -> List[str]:
"""Get the list of components available in the (currently) installed
Meerkat package. This is used to exclude components that cannot be used
in the app, specifically when writing ComponentContext.svelte.
Uses a heuristic that goes through the index.js file of the
Meerkat package and extracts components with a regex. It's not a
problem if extra imports (that are not components) are included
in this list, as long as all components are included.
"""
package_path = os.path.join(self.appdir, "node_modules", MEERKAT_NPM_PACKAGE)
index_js_path = os.path.join(package_path, "index.js")
with open(index_js_path, "r") as f:
index_js = f.read()
components = re.findall(
r"export \{ default as (\w+) \}",
index_js,
)
# Exclude commented out components.
remove_components = re.findall(
r"(?!\/\/) export \{ default as (\w+) \}",
index_js,
)
components = [c for c in components if c not in remove_components]
return components
def install(self, dev=False):
"""Run e.g. `npm install` on an app directory."""
return subprocess.run(
[self.package_manager, "install"] + (["--dev"] if dev else []),
cwd=self.appdir,
check=True,
# TODO: check that this is how we should handle outputs.
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def install_tailwind(self):
return subprocess.run(
["npx", "tailwindcss", "init", "tailwind.config.cjs", "-p"],
cwd=self.appdir,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def pm_run(self, command: str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT):
"""Run e.g. `npm run <command>` on the app directory."""
return subprocess.run(
[self.package_manager, "run", command],
cwd=self.appdir,
check=True,
stdout=stdout,
stderr=stderr,
)
def run_dev(self):
"""Run e.g. `npm run dev` on the internal Meerkat app directory."""
# TODO: check that this is how we should handle stdout and stderr.
return self.pm_run("dev", stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def run_build(self):
"""Run e.g. `npm run build` on the internal Meerkat app directory."""
return self.pm_run("build")
def upgrade_meerkat(self):
"""Run e.g. `npm i MEERKAT_NPM_PACKAGE` in the app."""
subprocess.run(
[self.package_manager, "i", MEERKAT_NPM_PACKAGE], cwd=self.appdir
)
def update_build_command(self, command: str):
"""Update the build command in package.json."""
with open(os.path.join(self.appdir, "package.json")) as f:
package = json.load(f)
package["scripts"]["build"] = command
with open(os.path.join(self.appdir, "package.json"), "w") as f:
# Format the JSON nicely.
json.dump(package, f, indent=4)
def update_dependencies(self, deps: dict, dev=False):
"""Update the dependencies in package.json."""
with open(os.path.join(self.appdir, "package.json")) as f:
package = json.load(f)
if dev:
if "devDependencies" not in package:
package["devDependencies"] = {}
package["devDependencies"] = {
**package["devDependencies"],
**deps,
}
else:
if "dependencies" not in package:
package["dependencies"] = {}
package["dependencies"] = {
**package["dependencies"],
**deps,
}
with open(os.path.join(self.appdir, "package.json"), "w") as f:
# Format the JSON nicely.
json.dump(package, f, indent=4)
def write_file(self, file: str, content: str):
"""Write a file to the app directory.
Uses a relative path to the file.
"""
write_file(os.path.join(self.appdir, file), content)
class MeerkatApp(App):
def create(self):
"""Run an installer that will call create-svelte in order to create a
new app."""
installer_app = CreateSvelteInstallerApp(
appdir="./installer",
appname=self.appname,
package_manager=self.package_manager,
)
installer_app.create()
installer_app.install()
installer_app.delete()
def print_finish_message(self):
# Pretty print information to console
rich.print(f":tada: Created [purple]{self.appname}[/purple]!")
# Print instructions
# 1. The new app is at name
rich.print(":arrow_right: The new app is at [purple]./app[/purple]")
# 2. To see an example of how to make a component,
# see src/lib/components/ExampleComponent.svelte
rich.print(
":arrow_right: To see an example of how to make a custom component, see "
"[purple]./app/src/lib/components/ExampleComponent.svelte[/purple] "
"and [purple]./app/src/lib/components/__init__.py[/purple]"
)
def setup(self):
# These need to be done first, .mk allows Meerkat
# to recognize the app as a Meerkat app
self.write_libdir() # src/lib
self.write_dot_mk() # .mk file
# Write an ExampleComponent.svelte and __init__.py file
# and a script example.py that uses the component
self.write_example_component()
self.write_example_py()
self.write_example_ipynb()
self.write_app_css() # app.css
self.write_constants_js() # constants.js
self.write_svelte_config() # svelte.config.js
self.write_tailwind_config() # tailwind.config.cjs
self.write_layout() # +layout.svelte, layout.js
self.write_root_route_alternate() # +page.svelte
self.write_slug_route() # [slug]/+page.svelte
self.write_gitignore() # .gitignore
self.write_setup_py() # setup.py
self.copy_assets()
def setup_mk_build_command(self):
"""Update the build command in package.json."""
self.update_build_command(
"VITE_API_URL_PLACEHOLDER=http://meerkat.dummy vite build"
)
def setup_mk_dependencies(self):
self.update_dependencies(
deps={
# TODO: use the actual version number.
MEERKAT_NPM_PACKAGE: "latest",
},
dev=False,
)
self.update_dependencies(
deps={
"tailwindcss": "latest",
"postcss": "latest",
"autoprefixer": "latest",
"@tailwindcss/typography": "latest",
"@sveltejs/adapter-static": "latest",
},
dev=True,
)
def copy_assets(self):
"""Copy assets from the Meerkat app to the new app."""
self.copy_banner_small() # banner_small.png
self.copy_favicon() # favicon.png
def copy_banner_small(self):
"""Copy the Meerkat banner to the new app."""
banner_path = os.path.join(
MEERKAT_INTERNAL_APP_DIR,
"src",
"lib",
"assets",
"banner_small.png",
)
# Copy banner to the new app
dir = f"{self.appdir}/src/lib/assets"
os.makedirs(dir, exist_ok=True)
shutil.copy(banner_path, f"{dir}/banner_small.png")
def copy_favicon(self):
"""Copy the Meerkat favicon to the new app."""
favicon_path = os.path.join(
MEERKAT_INTERNAL_APP_DIR,
"static",
"favicon.png",
)
# Copy favicon.png to the new app
shutil.copy(favicon_path, f"{self.appdir}/static/favicon.png")
def render_example_ipynb(self) -> "nbf.NotebookNode":
import nbformat as nbf
nb = nbf.v4.new_notebook()
text = """# Interactive Notebook Example"""
code_1 = """\
import meerkat as mk
from app.src.lib.components import ExampleComponent
# Launch the Meerkat GUI servers
mk.gui.start(api_port=5000, frontend_port=8000)"""
code_2 = """\
# Import and use the ExampleComponent
example_component = ExampleComponent(name="Meerkat")
# Run the page (startup may take a few seconds)
page = mk.gui.Page(component=example_component, id="example", height="200px")
page.launch()"""
nb["cells"] = [
nbf.v4.new_markdown_cell(text),
nbf.v4.new_code_cell(code_1),
nbf.v4.new_code_cell(code_2),
]
return nb
def write_app_css(self):
self.write_file("src/lib/app.css", JINJA_ENV.get_template("app.css").render())
def write_constants_js(self):
self.write_file(
"src/lib/constants.js",
JINJA_ENV.get_template("constants.js").render(),
)
def write_dot_mk(self):
self.write_file(".mk", "")
def write_example_component(self):
os.makedirs(f"{self.appdir}/src/lib/components", exist_ok=True)
self.write_file(
"src/lib/components/ExampleComponent.svelte",
JINJA_ENV.get_template("ExampleComponent.svelte").render(),
)
self.write_file(
"src/lib/components/__init__.py",
JINJA_ENV.get_template("components.init.py").render(),
)
def write_example_py(self):
self.write_file("../example.py", JINJA_ENV.get_template("example.py").render())
def write_example_ipynb(self):
import nbformat as nbf
with open("example.ipynb", "w") as f:
nbf.write(self.render_example_ipynb(), f)
def write_gitignore(self):
# TODO: Should these be gitignored?
# For users to publish apps, this probably shouldn't be.
# src/lib/wrappers
# src/lib/ComponentContext.svelte
self.write_file(
".gitignore",
JINJA_ENV.get_template("gitignore.jinja").render(),
)
def write_layout(self):
self.write_file(
"src/routes/+layout.svelte",
JINJA_ENV.get_template("layout.svelte").render(),
)
self.write_file(
"src/routes/+layout.js",
JINJA_ENV.get_template("+layout.js").render(),
)
def write_libdir(self):
os.makedirs(f"{self.appdir}/src/lib", exist_ok=True)
def write_root_route_alternate(self):
self.write_file(
"src/routes/+page.svelte",
JINJA_ENV.get_template("page.root.alternate.svelte").render(),
)
def write_setup_py(self):
self.write_file("../setup.py", JINJA_ENV.get_template("setup.py").render())
def write_slug_route(self):
os.makedirs(f"{self.appdir}/src/routes/[slug]", exist_ok=True)
self.write_file(
"src/routes/[slug]/+page.svelte",
JINJA_ENV.get_template("page.slug.svelte.jinja").render(),
)
def write_svelte_config(self):
self.write_file(
"svelte.config.js",
JINJA_ENV.get_template("svelte.config.js").render(),
)
def write_tailwind_config(self):
self.write_file(
"tailwind.config.cjs",
JINJA_ENV.get_template("tailwind.config.cjs").render(),
)
class CreateSvelteInstallerApp(App):
"""An installer app that is used to call `create-svelte` and create a new
SvelteKit app.
Rather than directly calling `create-svelte`, we use this installer
app to make it easier to add setup steps programatically.
"""
def render_package_json(self):
return JINJA_ENV.get_template("installer/package.json").render()
def render_installer_js(self):
return JINJA_ENV.get_template("installer/installer.js").render(
name=self.appname,
)
def create(self):
"""Create the installer app that will be used to run `create-
svelte`."""
super().create()
self.write_file("package.json", self.render_package_json())
self.write_file("installer.js", self.render_installer_js())
def install(self):
"""Install the installer app.
This will run `create-svelte`, which in turn will create the
SvelteKit app.
"""
# Install the `create-svelte` dependency.
super().install()
# Run the `create-svelte` app installer to create the SvelteKit app.
self.pm_run("create")
# Rename the app directory to `app`.
os.rename(f"{self.appname}", "app")
def delete(self):
"""Delete the installer app directory."""
shutil.rmtree(self.appdir)
class SystemHelper(metaclass=Singleton):
"""Information about the user's system."""
def __init__(self):
# Cache properties.
self._has_brew = None
self._has_node = None
self._has_npm = None
@property
def is_windows(self) -> bool:
"""Returns True if the system is Windows."""
return sys.platform == "win32"
@property
def is_linux(self) -> bool:
"""Returns True if the system is Linux."""
return sys.platform == "linux"
@property
def is_macos(self) -> bool:
"""Returns True if the system is MacOS."""
return sys.platform == "darwin"
@property
def has_brew(self) -> bool:
"""Check if the user has `homebrew` installed."""
if self._has_brew is None:
self._has_brew = subprocess.run(["which", "brew"]).returncode == 0
return self._has_brew
@property
def has_node(self) -> bool:
"""Check if the user has `node` installed."""
if self._has_node is None:
self._has_node = subprocess.run(["which", "node"]).returncode == 0
return self._has_node
@property
def has_npm(self) -> bool:
"""Check if the user has `npm` installed."""
if self._has_npm is None:
self._has_npm = subprocess.run(["which", "npm"]).returncode == 0
return self._has_npm
def install_svelte(self, cwd: str, package_manager: PackageManager = "npm"):
return subprocess.run(
[package_manager, "i", "create-svelte@latest"],
cwd=cwd,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def install_bun(self):
return subprocess.run(
["curl https://bun.sh/install | sh"],
check=True,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
def install_node(self):
if self.has_node:
return
if self.is_macos: # M1 or Intel
if not self.has_brew:
raise RuntimeError(
"Homebrew is recommended to install Meerkat on M1 Macs. "
"See these instructions: https://docs.brew.sh/Installation. "
"Alternatively, you can install Node manually."
)
rich.print(
"[yellow]Installing Node with Homebrew. "
"This may take a few minutes.[/yellow]"
)
return subprocess.run(["brew install node"], shell=True, check=True)
elif self.is_linux:
if self.has_npm:
# Has npm, so has node
# KG: do we need all of these?
subprocess.run("npm install -g n", shell=True, check=True)
subprocess.run("n latest", shell=True, check=True)
subprocess.run("npm install -g npm", shell=True, check=True)
subprocess.run("hash -d npm", shell=True, check=True)
subprocess.run("nvm install node", shell=True, check=True)
else:
subprocess.run(
"curl -fsSL https://deb.nodesource.com/setup_16.x | bash -",
shell=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
subprocess.run("apt-get install gcc g++ make", shell=True, check=True)
subprocess.run("apt-get install -y nodejs", shell=True, check=True)
elif self.is_windows:
raise RuntimeError(
"Windows is not supported yet. Please file an issue on GitHub."
)
def __repr__(self) -> str:
return f"""\
{self.__class__.__name__}(
is_windows={self.is_windows},
is_linux={self.is_linux},
is_macos={self.is_macos},
has_brew={self.has_brew},
)\
"""
logger.debug(
tabulate(
[
["sys.argv", sys.argv],
["", ""],
["Environment Variables", ""],
["Meerkat Base Directory", MEERKAT_BASE_DIR],
["Meerkat Internal App Directory", MEERKAT_INTERNAL_APP_DIR],
["Meerkat App Directory", MEERKAT_APP_DIR],
["Meerkat NPM Package", MEERKAT_NPM_PACKAGE],
["Meerkat Templates Directory", MEERKAT_TEMPLATES_DIR],
["Meerkat Run Subprocess", MEERKAT_RUN_SUBPROCESS],
["Meerkat Run Script Path", MEERKAT_RUN_SCRIPT_PATH],
["Meerkat Run Process", MEERKAT_RUN_PROCESS],
["Meerkat Run Reload Count", MEERKAT_RUN_RELOAD_COUNT],
["", ""],
["Path Helper", ""],
["rundir", PathHelper().rundir],
["appdir", PathHelper().appdir],
["is_user_app", PathHelper().is_user_app],
["scriptpath", PathHelper().scriptpath],
["scriptpath_abs", PathHelper().scriptpath_abs],
["scriptdir_abs", PathHelper().scriptdir_abs],
["scriptname", PathHelper().scriptname],
]
)
)
|
meerkat-main
|
meerkat/constants.py
|
"""Meerkat."""
# flake8: noqa
from json import JSONEncoder
def _default(self, obj):
# https://stackoverflow.com/a/18561055
# Monkey patch json module at import time so
# JSONEncoder.default() checks for a "to_json()"
# method and uses it to encode objects if it exists
# Note: this may not have been for FastAPI, but another library
if isinstance(obj, gui.Store):
return getattr(obj, "to_json", _default.default)()
return getattr(obj.__class__, "to_json", _default.default)(obj)
_default.default = JSONEncoder().default
JSONEncoder.default = _default
from meerkat.logging.utils import initialize_logging
initialize_logging()
import meerkat.interactive as gui
import meerkat.interactive.formatter as format
import meerkat.state as GlobalState
from meerkat.cells.abstract import AbstractCell
from meerkat.columns.abstract import Column, column
from meerkat.columns.deferred.audio import AudioColumn
from meerkat.columns.deferred.base import DeferredCell, DeferredColumn
from meerkat.columns.deferred.file import FileCell, FileColumn, FileLoader
from meerkat.columns.deferred.image import ImageColumn, image
from meerkat.columns.object.base import ObjectColumn
from meerkat.columns.scalar import ScalarColumn
from meerkat.columns.scalar.arrow import ArrowScalarColumn
from meerkat.columns.scalar.pandas import PandasScalarColumn
from meerkat.columns.tensor import TensorColumn
from meerkat.columns.tensor.numpy import NumPyTensorColumn
from meerkat.columns.tensor.torch import TorchTensorColumn
from meerkat.dataframe import DataFrame
from meerkat.datasets import get
from meerkat.ops.aggregate.aggregate import aggregate
from meerkat.ops.complete import complete
from meerkat.ops.concat import concat
from meerkat.ops.cond import (
_abs,
_all,
_any,
_bool,
_complex,
_dict,
_float,
_hex,
_int,
_len,
_list,
_max,
_min,
_oct,
_range,
_set,
_slice,
_str,
_sum,
_tuple,
cand,
cnot,
cor,
)
from meerkat.ops.embed import embed
from meerkat.ops.map import defer, map
from meerkat.ops.merge import merge
from meerkat.ops.sample import sample
from meerkat.ops.search import search
from meerkat.ops.shuffle import shuffle
from meerkat.ops.sliceby.clusterby import clusterby
from meerkat.ops.sliceby.explainby import explainby
from meerkat.ops.sliceby.groupby import groupby
from meerkat.ops.sort import sort
from meerkat.provenance import provenance
from meerkat.row import Row
from meerkat.tools.utils import classproperty
from .config import config
# alias for DataFrame for backwards compatibility
DataPanel = DataFrame
# aliases for columns
scalar = ScalarColumn
tensor = TensorColumn
deferred = DeferredColumn
objects = ObjectColumn
files = FileColumn
audio = AudioColumn
# aliases for io
from_csv = DataFrame.from_csv
from_feather = DataFrame.from_feather
from_json = DataFrame.from_json
from_parquet = DataFrame.from_parquet
from_pandas = DataFrame.from_pandas
from_arrow = DataFrame.from_arrow
from_huggingface = DataFrame.from_huggingface
read = DataFrame.read
# aliases for meerkat magic method invokers.
all = _all
any = _any
len = _len
int = _int
float = _float
complex = _complex
hex = _hex
oct = _oct
bool = _bool
str = _str
list = _list
tuple = _tuple
sum = _sum
dict = _dict
set = _set
range = _range
abs = _abs
min = _min
max = _max
slice = _slice
# These statements needs to be after the imports above.
# Do not move them.
import meerkat.interactive.svelte
from meerkat.interactive import Store, endpoint, mark, reactive, unmarked
from meerkat.interactive.formatter.base import (
BaseFormatter,
FormatterGroup,
FormatterPlaceholder,
)
from meerkat.interactive.graph.magic import magic
__all__ = [
"DataFrame",
"Row",
"reactive",
"unmarked",
"Store",
"mark",
"endpoint",
"magic",
# <<<< Columns >>>>
"column",
"Column",
"ObjectColumn",
"ScalarColumn",
"PandasScalarColumn",
"ArrowScalarColumn",
"TensorColumn",
"NumPyTensorColumn",
"TorchTensorColumn",
"DeferredColumn",
"FileColumn",
"ImageColumn",
"AudioColumn",
"AbstractCell",
"DeferredCell",
"FileCell",
# <<<< Operations >>>>
"map",
"defer",
"concat",
"complete",
"merge",
"embed",
"sort",
"sample",
"shuffle",
"groupby",
"clusterby",
"explainby",
"aggregate",
"cand",
"cor",
"cnot",
"all",
"any",
"bool",
"len",
"int",
"float",
"complex",
"hex",
"oct",
"str",
"list",
"tuple",
"slice",
"sum",
"dict",
"set",
"range",
"abs",
# <<<< I/O >>>>
"from_csv",
"from_json",
"from_parquet",
"from_feather",
"from_pandas",
"from_arrow",
"from_huggingface",
"read",
# <<<< Formatters >>>>
"BaseFormatter",
"FormatterGroup",
"FormatterPlaceholder",
# <<<< Misc >>>>
"provenance",
"config",
"gui",
"format",
"FileLoader",
"get",
"GlobalState",
# <<<< Aliases >>>>
"DataPanel",
"scalar",
"tensor",
"deferred",
"objects",
"files",
"image",
"audio",
# <<<< Utilities >>>>
"classproperty",
]
|
meerkat-main
|
meerkat/__init__.py
|
from __future__ import annotations
import base64
from io import BytesIO
from typing import TYPE_CHECKING, Any
from PIL import Image
import meerkat as mk
if TYPE_CHECKING:
from meerkat.columns.deferred.base import DeferredCell
from meerkat.columns.deferred.file import FileCell
def auto_formatter(cell: Any):
if isinstance(cell, Image.Image):
return image_formatter(cell)
else:
return repr(cell)
def lambda_cell_formatter(cell: DeferredCell):
return auto_formatter(cell.get())
def image_formatter(cell: Image.Image):
if not mk.config.display.show_images:
return repr(cell)
max_image_width = mk.config.display.max_image_width
max_image_height = mk.config.display.max_image_height
cell.thumbnail((max_image_width, max_image_height))
with BytesIO() as buffer:
cell.save(buffer, "jpeg")
im_base_64 = base64.b64encode(buffer.getvalue()).decode()
return f'<img src="data:image/jpeg;base64,{im_base_64}">'
def image_file_formatter(cell: FileCell):
if not mk.config.display.show_images:
return repr(cell)
return lambda_cell_formatter(cell)
def audio_file_formatter(cell: FileCell) -> str:
if not mk.config.display.show_audio:
return repr(cell)
# TODO (Sabri): Implement based on audio_formatter so we can include transform
#
from IPython.display import Audio
return Audio(filename=cell.absolute_path)._repr_html_()
|
meerkat-main
|
meerkat/display.py
|
class MergeError(ValueError):
pass
class ConcatError(ValueError):
pass
class ConcatWarning(RuntimeWarning):
pass
class ConsolidationError(ValueError):
pass
class ImmutableError(ValueError):
pass
class ConversionError(ValueError):
pass
class ExperimentalWarning(FutureWarning):
pass
class TriggerError(Exception):
pass
|
meerkat-main
|
meerkat/errors.py
|
from __future__ import annotations
import warnings
import weakref
from copy import copy
from functools import wraps
from inspect import getcallargs
from typing import Any, Dict, List, Mapping, Sequence, Tuple, Union
import meerkat as mk
from meerkat.errors import ExperimentalWarning
_provenance_enabled = False
def set_provenance(enabled=True):
global _provenance_enabled
_provenance_enabled = enabled
class provenance:
def __init__(self, enabled: bool = True):
"""Context manager for enabling provenance capture in Meerkat.
Example:
```python
import meerkat as mk
with mk.provenance():
df = mk.DataFrame.from_batch({...})
```
"""
self.prev = None
self.enabled = enabled
def __enter__(self):
global _provenance_enabled
self.prev = _provenance_enabled
_provenance_enabled = self.enabled
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
global _provenance_enabled
_provenance_enabled = self.prev
def is_provenance_enabled():
return _provenance_enabled
class ProvenanceMixin:
def __init__(self, *args, **kwargs):
super(ProvenanceMixin, self).__init__(*args, **kwargs)
self._init_node()
def _init_node(self):
self._node = ProvenanceObjNode(self)
@property
def node(self):
return self._node
def get_provenance(
self, include_columns: bool = False, last_parent_only: bool = False
):
if self._node is None:
return None
nodes, edges = self._node.get_provenance(last_parent_only=last_parent_only)
if include_columns:
return nodes, edges
else:
# filter out edges and nodes
nodes = [
node
for node in nodes
if (
(
isinstance(node, ProvenanceOpNode)
and any(
[
issubclass(node.type, mk.DataFrame)
for node, key in node.children
]
)
)
or issubclass(node.type, mk.DataFrame)
)
]
edges = [
edge for edge in edges if all([(node in nodes) for node in edge[:2]])
]
return nodes, edges
class ProvenanceNode:
def add_child(self, node: ProvenanceNode, key: Tuple):
self._children.append((node, key))
with provenance(enabled=False):
self.cache_repr()
def add_parent(self, node: ProvenanceNode, key: Tuple):
self._parents.append((node, key))
with provenance(enabled=False):
self.cache_repr()
@property
def parents(self):
return self._parents
@property
def last_parent(self):
if len(self._parents) > 0:
return self._parents[-1]
else:
return None
@property
def children(self):
return self._children
def get_provenance(self, last_parent_only: bool = False):
edges = set()
nodes = set()
for parent, key in (
self.parents[-1:]
if (last_parent_only and self.__class__ == ProvenanceObjNode)
else self.parents
):
prev_nodes, prev_edges = parent.get_provenance(
last_parent_only=last_parent_only
)
edges |= prev_edges
edges.add((parent, self, key))
nodes |= prev_nodes
nodes.add(self)
return nodes, edges
def cache_repr(self):
self._cached_repr = self._repr() + "(deleted)"
def _repr(self):
raise NotImplementedError()
def __repr__(self):
if self.ref() is None:
return self._cached_repr
else:
return self._repr()
def __getstate__(self):
state = copy(self.__dict__)
state["ref"] = None
return state
def __setstate__(self, state: dict):
state["ref"] = lambda: None
self.__dict__.update(state)
class ProvenanceObjNode(ProvenanceNode):
def __init__(self, obj: ProvenanceMixin):
self.ref = weakref.ref(obj)
self.type = type(obj)
self._parents: List[Tuple(ProvenanceOpNode, tuple)] = []
self._children: List[Tuple(ProvenanceOpNode, tuple)] = []
def _repr(self):
return f"ObjNode({str(self.ref())})"
class ProvenanceOpNode(ProvenanceNode):
def __init__(
self, fn: callable, inputs: dict, outputs: object, captured_args: dict
):
self.ref = weakref.ref(fn)
self.type = type(fn)
self.name = fn.__qualname__
self.module = fn.__module__
self.captured_args = captured_args
self._parents: List[Tuple(ProvenanceObjNode, tuple)] = []
self._children: List[Tuple(ProvenanceObjNode, tuple)] = []
for key, inp in inputs.items():
self.add_parent(inp.node, key)
inp.node.add_child(self, key)
input_ids = {id(v) for v in inputs.values()}
for key, output in outputs.items():
# only include outputs that weren't passed in
if id(output) not in input_ids:
self.add_child(output.node, key)
output.node.add_parent(self, key)
def _repr(self):
return f"OpNode({self.module}.{self.name})"
def capture_provenance(capture_args: Sequence[str] = None):
capture_args = [] if capture_args is None else capture_args
def _provenance(fn: callable):
@wraps(fn)
def _wrapper(*args, **kwargs):
if not is_provenance_enabled():
return fn(*args, **kwargs)
args_dict = getcallargs(fn, *args, **kwargs)
if "kwargs" in args_dict:
args_dict.update(args_dict.pop("kwargs"))
captured_args = {arg: args_dict[arg] for arg in capture_args}
out = fn(*args, **kwargs)
# collect instances of ProvenanceMixin nested in the output
inputs = get_nested_objs(args_dict)
outputs = get_nested_objs(out)
ProvenanceOpNode(
fn=fn, inputs=inputs, outputs=outputs, captured_args=captured_args
)
return out
return _wrapper
return _provenance
def get_nested_objs(data):
"""Recursively get DataFrames and Columns from nested collections."""
objs = {}
_get_nested_objs(objs, (), data)
return objs
def _get_nested_objs(objs: Dict, key: Tuple[str], data: object):
if isinstance(data, Sequence) and not isinstance(data, str):
for idx, item in enumerate(data):
_get_nested_objs(objs, key=(*key, idx), data=item)
elif isinstance(data, Mapping):
for curr_key, item in data.items():
_get_nested_objs(objs, key=(*key, curr_key), data=item)
elif isinstance(data, mk.DataFrame):
objs[key] = data
for curr_key, item in data.items():
_get_nested_objs(objs, key=(*key, curr_key), data=item)
elif isinstance(data, mk.Column):
objs[key] = data
def visualize_provenance(
obj: Union[ProvenanceObjNode, ProvenanceOpNode],
show_columns: bool = False,
last_parent_only: bool = False,
):
warnings.warn( # pragma: no cover
ExperimentalWarning(
"The function `meerkat.provenance.visualize_provenance` is experimental and"
" has limited test coverage. Proceed with caution."
)
)
try: # pragma: no cover
import cyjupyter
except ImportError: # pragma: no cover
raise ImportError(
"`visualize_provenance` requires the `cyjupyter` dependency."
"See https://github.com/cytoscape/cytoscape-jupyter-widget"
)
nodes, edges = obj.get_provenance( # pragma: no cover
include_columns=show_columns, last_parent_only=last_parent_only
)
cy_nodes = [ # pragma: no cover
{
"data": {
"id": id(node),
"name": str(node),
"type": "obj" if isinstance(node, ProvenanceObjNode) else "op",
}
}
for node in nodes
]
cy_edges = [ # pragma: no cover
{
"data": {
"source": id(edge[0]),
"target": id(edge[1]),
"key": edge[2],
}
}
for edge in edges
]
cy_data = {"elements": {"nodes": cy_nodes, "edges": cy_edges}} # pragma: no cover
style = [ # pragma: no cover
{
"selector": "node",
"css": {
"content": "data(name)",
"background-color": "#fc8d62",
"border-color": "#252525",
"border-opacity": 1.0,
"border-width": 3,
},
},
{
"selector": "node[type = 'op']",
"css": {
"shape": "barrel",
"background-color": "#8da0cb",
},
},
{
# need to double index to access metadata (degree etc.)
"selector": "node[[degree = 0]]",
"css": {
"visibility": "hidden",
},
},
{
"selector": "edge",
"css": {
"content": "data(key)",
"line-color": "#252525",
"mid-target-arrow-color": "#8da0cb",
"mid-target-arrow-shape": "triangle",
"arrow-scale": 2.5,
"text-margin-x": 10,
"text-margin-y": 10,
},
},
]
return cyjupyter.Cytoscape( # pragma: no cover
data=cy_data, visual_style=style, layout_name="breadthfirst"
)
|
meerkat-main
|
meerkat/provenance.py
|
from meerkat.columns.deferred.base import DeferredCell
class Row(dict):
def __call__(self):
# FIXME(Sabri): Need to actually implement this based on the DAG of the deferred
# cells for effiency.
return Row(
{
name: cell() if isinstance(cell, DeferredCell) else cell
for name, cell in self.items()
}
)
|
meerkat-main
|
meerkat/row.py
|
import logging
import os
import subprocess
import time
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional
from fastapi import FastAPI, HTTPException
from meerkat.interactive.server import Server
from meerkat.tools.utils import WeakMapping
if TYPE_CHECKING:
from meerkat.interactive.modification import Modification
from meerkat.mixins.identifiable import IdentifiableMixin
logger = logging.getLogger(__name__)
@dataclass
class Secrets:
api_keys: Dict[str, str] = field(default_factory=dict)
def add(self, api: str, api_key: str):
self.api_keys[api] = api_key
def get(self, api: str):
try:
return self.api_keys[api]
except KeyError:
raise HTTPException(
status_code=404,
detail=f"No API key found for {api}.\
Add one with `secrets.add(api, api_key)`.",
)
@dataclass
class LanguageModel:
manifest: Any = None
def set(self, client: str = "ai21", engine: str = "j1-jumbo"):
from manifest import Manifest
self.manifest = Manifest(
client_name=client,
client_connection=state.secrets.get(client),
engine=engine,
cache_name="sqlite",
cache_connection="./logs",
)
def get(self):
return self.manifest
@dataclass
class APIInfo:
api: Optional[FastAPI]
port: Optional[int]
server: Optional[Server] = None
name: str = "localhost"
shared: bool = False
process: Optional[subprocess.Popen] = None
_url: Optional[str] = None
@property
def url(self):
if self._url:
return self._url
if self.shared:
return f"http://{self.name}"
return f"http://{self.name}:{self.port}"
@property
def docs_url(self):
return f"{self.url}/docs"
@property
def docs(self):
from IPython.display import IFrame
return IFrame(self.docs_url, width=800, height=600)
@dataclass
class FrontendInfo:
package_manager: Optional[str]
port: Optional[int]
name: str = "localhost"
shared: bool = False
process: Optional[subprocess.Popen] = None
_url: Optional[str] = None
@property
def url(self):
if self._url:
return self._url
if self.shared:
return f"http://{self.name}"
return f"http://{self.name}:{self.port}"
@dataclass
class Identifiables:
"""We maintain a separate group for each type of identifiable object.
Objects in the group are identified by a unique id.
"""
columns: WeakMapping = field(default_factory=WeakMapping)
dataframes: WeakMapping = field(default_factory=WeakMapping)
pages: Mapping = field(default_factory=dict)
slicebys: WeakMapping = field(default_factory=WeakMapping)
aggregations: WeakMapping = field(default_factory=WeakMapping)
box_operations: WeakMapping = field(default_factory=WeakMapping)
components: WeakMapping = field(default_factory=WeakMapping)
refs: WeakMapping = field(default_factory=WeakMapping)
stores: WeakMapping = field(default_factory=WeakMapping)
endpoints: WeakMapping = field(default_factory=WeakMapping)
routers: WeakMapping = field(default_factory=WeakMapping)
nodes: WeakMapping = field(default_factory=WeakMapping)
states: WeakMapping = field(default_factory=WeakMapping)
def add(self, obj: "IdentifiableMixin"):
group = getattr(self, obj.identifiable_group)
group[obj.id] = obj
def get(self, id: str, group: str):
group, group_name = getattr(self, group), group
try:
value = group[id]
except KeyError:
raise HTTPException(
status_code=404,
detail=f"No object in group '{group_name}' with id '{id}'",
)
return value
@dataclass
class ModificationQueue:
"""A queue of modifications to be applied to a dataframe."""
queue: List["Modification"] = field(default_factory=list)
# Boolean attribute that controls whether the queue is accepting new
# modifications
# When _ready is False, `add` will no-op
_ready: bool = False
def add(self, modification: "Modification"):
if self._ready:
logger.debug(f"Adding modification {modification} to queue.")
self.queue.append(modification)
return
# Do nothing if not ready
logger.debug(f"Modification queue not ready. Ignoring {modification}.")
def clear(self) -> List["Modification"]:
"""Clear the modification queue, and return the old queue."""
logger.debug("Clearing modification queue.")
current_queue = self.queue
self.queue = []
return current_queue
def ready(self):
"""Ready the queue for accepting new modifications."""
count = 0
while self._ready:
# Modification queue is already in use
# Wait for it to be unready
logger.debug("Modification queue is already in use. Waiting...")
time.sleep(0.1)
count += 1
if count == 1e-3:
logger.warn(
"Modification queue is taking a long time to unready."
"Check for deadlocks."
)
self._ready = True
logger.debug("Modification queue is now ready.")
def unready(self):
"""Unready the queue for accepting new modifications."""
self._ready = False
logger.debug("Modification queue is now unready.")
@dataclass
class ProgressQueue:
"""A queue of progress messages to be displayed to the user."""
queue: list = field(default_factory=list)
def add(self, message: str):
self.queue.append(message)
def clear(self) -> list:
"""Clear the progress queue, and return the old queue."""
current_queue = self.queue
self.queue = []
return current_queue
@dataclass
class GlobalState:
api_info: Optional[APIInfo] = None
frontend_info: Optional[FrontendInfo] = None
identifiables: Identifiables = field(default_factory=Identifiables)
secrets: Secrets = field(default_factory=Secrets)
llm: LanguageModel = field(default_factory=LanguageModel)
modification_queue: ModificationQueue = field(default_factory=ModificationQueue)
progress_queue: ProgressQueue = field(default_factory=ProgressQueue)
global state
state = GlobalState()
def add_secret(api: str, api_key: str):
"""Add an API key to the global state."""
state.secrets.add(api, api_key)
def run_on_startup():
"""Run on startup."""
frontend_url = os.environ.get("MEERKAT_FRONTEND_URL", None)
if frontend_url:
state.frontend_info = FrontendInfo(None, None, _url=frontend_url)
api_url = os.environ.get("MEERKAT_API_URL", None)
if api_url:
state.api_info = APIInfo(None, None, _url=api_url)
run_on_startup()
|
meerkat-main
|
meerkat/state.py
|
from __future__ import annotations
import abc
class AbstractCell(abc.ABC):
def __init__(self, *args, **kwargs):
super(AbstractCell, self).__init__(*args, **kwargs)
def get(self, *args, **kwargs) -> object:
"""Get me the thing that this cell exists for."""
raise NotImplementedError("Must implement `get`.")
@property
def metadata(self) -> dict:
"""Get the metadata associated with this cell."""
return {}
def __getitem__(self, index):
return self.get()[index]
def __str__(self):
return f"{self.__class__.__name__}"
def __repr__(self):
return f"{self.__class__.__name__}"
def __eq__(self, other):
raise NotImplementedError
|
meerkat-main
|
meerkat/cells/abstract.py
|
meerkat-main
|
meerkat/cells/__init__.py
|
|
from __future__ import annotations
from pathlib import Path
from typing import Callable, Dict, Sequence, Union
from meerkat.cells.abstract import AbstractCell
from meerkat.mixins.cloneable import StateClass
from meerkat.mixins.file import PathLikeType, PathsMixin
from meerkat.tools.lazy_loader import LazyLoader
pydicom = LazyLoader("pydicom")
dosma = LazyLoader("dosma")
dosma_core_io_format_io = LazyLoader("dosma.core.io.format_io")
try:
# Mapping from pydicom types to python types
_PYDICOM_TO_PYTHON = {
pydicom.valuerep.DSdecimal: float,
pydicom.valuerep.DSfloat: float,
pydicom.valuerep.IS: int,
pydicom.valuerep.PersonName: str,
pydicom.multival.MultiValue: list,
pydicom.sequence.Sequence: list,
}
except ImportError:
_PYDICOM_TO_PYTHON = {}
class MedicalVolumeCell(PathsMixin, AbstractCell):
"""Interface for loading medical volume data.
Examples:
# Specify xray dicoms with default orientation ``("SI", "AP")``:
>>> cell = MedicalVolumeCell("/path/to/xray.dcm",
loader=DicomReader(group_by=None, default_ornt=("SI", "AP"))
# Load multi-echo MRI volumes
>>> cell = MedicalVolumeCell("/path/to/mri/scan/dir",
loader=DicomReader(group_by="EchoNumbers"))
"""
def __init__(
self,
paths: Union[PathLikeType, Sequence[PathLikeType]],
loader: Callable = None,
transform: Callable = None,
cache_metadata: bool = False,
*args,
**kwargs,
):
super(MedicalVolumeCell, self).__init__(paths=paths, *args, **kwargs)
self._metadata = None
self.transform: Callable = transform
self.cache_metadata = cache_metadata
self.loader = self.default_loader(self.paths) if loader is None else loader
def __getitem__(self, index):
image = self.get()
return image[index]
def __str__(self):
return f"{self.__class__.__name__}({self.paths})"
def __repr__(self):
return f"{self.__class__.__name__}({self.paths})"
@classmethod
def _unroll_path(cls, paths: Sequence[Path]):
if len(paths) == 1:
return paths[0]
return paths
@classmethod
def default_loader(cls, paths: Sequence[Path], *args, **kwargs):
paths = cls._unroll_path(paths)
return dosma.get_reader(dosma.ImageDataFormat.get_image_data_format(paths))
def get(self, *args, cache_metadata: bool = None, **kwargs):
image = self.loader(self._unroll_path(self.paths))
if cache_metadata is None:
cache_metadata = self.cache_metadata
# DOSMA returns a list of MedicalVolumes by default.
# RG overrides this functinality - if only one MedicalVolume
# is returned, unpack that volume from the list.
if isinstance(image, (list, tuple)) and len(image) == 1:
image = image[0]
if self._metadata is None and cache_metadata:
_img = image[0] if isinstance(image, (list, tuple)) else image
headers = _img.headers(flatten=True)
self._metadata = self._prune_metadata(headers[0]) if headers else None
if self.transform is not None:
image = self.transform(image)
return image
def _prune_metadata(self, metadata):
"""Prunes the metadata to avoid keeping large fields.
We delete the PixelData field, because that data is a duplicate
of the image.
"""
del metadata["PixelData"]
return metadata
def get_metadata(
self,
ignore_bytes: bool = False,
readable: bool = False,
as_raw_type: bool = False,
force_load: bool = False,
) -> Dict:
if self._metadata is None:
if force_load:
_ = self.get(cache_metadata=True)
metadata = self._metadata
if not self.cache_metadata:
self.clear_metadata()
else:
return None
else:
metadata = self._metadata
# Raw data elements need to be decoded.
metadata = {
k: metadata[k] if hasattr(v, "is_raw") and v.is_raw else v
for k, v in metadata.items()
}
if ignore_bytes:
metadata = {
k: v for k, v in metadata.items() if not isinstance(v.value, bytes)
}
if readable:
metadata = {v.name: v for v in metadata.values()}
if as_raw_type:
metadata = {
k: (
_PYDICOM_TO_PYTHON[type(v.value)](v.value)
if type(v.value) in _PYDICOM_TO_PYTHON
else v.value
)
for k, v in metadata.items()
}
return metadata
def clear_metadata(self):
self._metadata = None
def get_state(self):
# Check if the loader is a `DataReader` from `dosma`
is_dosma_reader = isinstance(self.loader, dosma_core_io_format_io.DataReader)
loader = StateClass(
klass=type(self.loader) if is_dosma_reader else None,
state=self.loader.state_dict() if is_dosma_reader else self.loader,
)
return {
"paths": self.paths,
"loader": loader,
"transform": self.transform,
}
@classmethod
def from_state(cls, state, *args, **kwargs):
# Unpack the state
loader_state = state["loader"]
if loader_state.klass is None:
loader = loader_state.state
else:
loader = loader_state.klass()
loader.load_state_dict(loader_state.state)
return cls(
state["paths"],
loader=loader,
transform=state["transform"],
)
|
meerkat-main
|
meerkat/cells/volume.py
|
from meerkat import env
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.tools.utils import requires
torch = LazyLoader("torch")
torchaudio = LazyLoader("torchaudio")
class Audio:
def __init__(
self,
data,
sampling_rate: int,
bits: int = None,
) -> None:
if not env.is_package_installed("torch"):
raise ValueError(
f"{type(self)} requires torch. Follow these instructions "
"to install torch: https://pytorch.org/get-started/locally/."
)
self.data = torch.as_tensor(data)
self.sampling_rate = sampling_rate
self.bits = bits
def duration(self) -> float:
"""Return the duration of the audio in seconds."""
return len(self.data) / self.sampling_rate
@requires("torchaudio")
def resample(self, sampling_rate: int) -> "Audio":
"""Resample the audio with a new sampling rate.
Args:
sampling_rate: The new sampling rate.
Returns:
The resampled audio.
"""
if not env.is_package_installed("torchaudio"):
raise ValueError(
"resample requires torchaudio. Install with `pip install torchaudio`."
)
return Audio(
torchaudio.functional.resample(
self.data, self.sampling_rate, sampling_rate
),
sampling_rate,
)
def normalize(
self, lower: float = 0.0, upper: float = 1.0, eps: float = 1e-6
) -> "Audio":
"""Normalize the audio to a given range.
Args:
lower: The lower bound of the range.
upper: The upper bound of the range.
eps: The epsilon to used to avoid division by zero.
Returns:
The normalized audio.
"""
_min = torch.amin(self.data)
_max = torch.amax(self.data)
data = lower + (upper - lower) * (self.data - _min) / (_max - _min + eps)
return Audio(data=data, sampling_rate=self.sampling_rate)
def quantize(self, bits: int, epsilon: float = 1e-2) -> "Audio":
"""Linearly quantize a signal to a given number of bits.
The signal must be in the range [0, 1].
Args:
bits: The number of bits to quantize to.
epsilon: The epsilon to use for clipping the signal.
Returns:
The quantized audio.
"""
if self.bits is not None:
raise ValueError(
"Audio is already quantized. Use `.dequantize` to dequantize "
"the signal and then requantize."
)
if torch.any(self.data < 0) or torch.any(self.data > 1):
raise ValueError("Audio must be in the range [0, 1] to quantize.")
q_levels = 1 << bits
samples = (q_levels - epsilon) * self.data
samples += epsilon / 2
return Audio(samples.long(), sampling_rate=self.sampling_rate, bits=self.bits)
def dequantize(self) -> "Audio":
"""Dequantize a signal.
Returns:
The dequantized audio.
"""
if self.bits is None:
raise ValueError("Audio is not quantized.")
q_levels = 1 << self.bits
return Audio(
self.data.float() / (q_levels / 2) - 1, sampling_rate=self.sampling_rate
)
def __repr__(self) -> str:
return f"Audio({self.duration()} seconds @ {self.sampling_rate}Hz)"
def __eq__(self, other: "Audio") -> bool:
return (
self.data.shape == other.data.shape
and self.sampling_rate == other.sampling_rate
and torch.allclose(self.data, other.data)
)
def __getitem__(self, key: int) -> "Audio":
return Audio(self.data[key], self.sampling_rate)
def __len__(self) -> int:
return len(self.data)
def _repr_html_(self) -> str:
import IPython.display as ipd
return ipd.Audio(self.data, rate=self.sampling_rate)._repr_html_()
|
meerkat-main
|
meerkat/cells/audio.py
|
from __future__ import annotations
from meerkat.cells.abstract import AbstractCell
from meerkat.tools.lazy_loader import LazyLoader
spacy = LazyLoader("spacy")
spacy_attrs = LazyLoader("spacy.attrs")
spacy_tokens = LazyLoader("spacy.tokens")
class SpacyCell(AbstractCell):
def __init__(
self,
doc: spacy_tokens.Doc,
*args,
**kwargs,
):
super(SpacyCell, self).__init__(*args, **kwargs)
# Put some data into the cell
self.doc = doc
def default_loader(self, *args, **kwargs):
return self
def get(self, *args, **kwargs):
return self.doc
def get_state(self):
attrs = [name for name in spacy_attrs.NAMES if name != "HEAD"]
arr = self.get().to_array(attrs)
return {
"arr": arr.flatten(),
"shape": list(arr.shape),
"words": [t.text for t in self.get()],
}
@classmethod
def from_state(cls, state, nlp: spacy.language.Language):
doc = spacy_tokens.Doc(nlp.vocab, words=state["words"])
attrs = [name for name in spacy_attrs.NAMES if name != "HEAD"]
return cls(doc.from_array(attrs, state["arr"].reshape(state["shape"])))
def __getitem__(self, index):
return self.get()[index]
def __getattr__(self, item):
try:
return getattr(self.get(), item)
except AttributeError:
raise AttributeError(f"Attribute {item} not found.")
def __repr__(self):
return f"{self.__class__.__name__}({self.get().__repr__()})"
class LazySpacyCell(AbstractCell):
def __init__(
self,
text: str,
nlp: spacy.language.Language,
*args,
**kwargs,
):
super(LazySpacyCell, self).__init__(*args, **kwargs)
# Put some data into the cell
self.text = text
self.nlp = nlp
def default_loader(self, *args, **kwargs):
return self
def get(self, *args, **kwargs):
return self.nlp(self.text)
def get_state(self):
return {
"text": self.text,
}
@classmethod
def from_state(cls, state, nlp: spacy.language.Language):
return cls(text=state["text"], nlp=nlp)
def __getitem__(self, index):
return self.get()[index]
def __getattr__(self, item):
try:
return getattr(self.get(), item)
except AttributeError:
raise AttributeError(f"Attribute {item} not found.")
def __repr__(self):
snippet = f"{self.text[:15]}..." if len(self.text) > 20 else self.text
return f"{self.__class__.__name__}({snippet})"
|
meerkat-main
|
meerkat/cells/spacy.py
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lazy loader class."""
from __future__ import absolute_import, division, print_function
import importlib
import logging
import types
logger = logging.getLogger(__name__)
class LazyLoader(types.ModuleType):
"""Lazily import a module, mainly to avoid pulling in large dependencies.
`contrib`, and `ffmpeg` are examples of modules that are large and
not always needed, and this allows them to only be loaded when they
are used.
"""
# The lint error here is incorrect.
def __init__(
self,
local_name,
parent_module_globals=None,
warning=None,
error=None,
): # pylint: disable=super-on-old-class
if parent_module_globals is None:
parent_module_globals = globals()
self._local_name = local_name
self._parent_module_globals = parent_module_globals
self._warning = warning
self._error = error
super(LazyLoader, self).__init__(local_name)
def _load(self):
"""Load the module and insert it into the parent's globals."""
# Import the target module and insert it into the parent's namespace
try:
module = importlib.import_module(self.__name__)
except ImportError as e:
raise ImportError(self._error) if self._error else e
self._parent_module_globals[self._local_name] = module
# Emit a warning if one was specified
if self._warning:
logger.warning(self._warning)
# Make sure to only warn once.
self._warning = None
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
def __getstate__(self):
"""Enable pickling, by removing the parent module globals from the
dict."""
return {
"local_name": self._local_name,
"warning": self._warning,
"error": self._error,
}
def __setstate__(self, state):
"""Enable pickling."""
self.__init__(**state)
|
meerkat-main
|
meerkat/tools/lazy_loader.py
|
import functools
from typing import Any, List, Optional, Sequence
from fvcore.common.registry import Registry as _Registry
from tabulate import tabulate
from meerkat.dataframe import DataFrame
class Registry(_Registry):
"""Extension of fvcore's registry that supports aliases."""
_ALIAS_KEYWORDS = ("_aliases", "_ALIASES")
def __init__(self, name: str):
super().__init__(name=name)
self._metadata_map = {}
def get(self, name: str, **kwargs) -> Any:
ret = self._obj_map.get(name)
if ret is None:
raise KeyError(
"No object named '{}' found in '{}' registry!".format(name, self._name)
)
return ret(**kwargs)()
def get_obj(self, name: str) -> type:
return self._obj_map[name]
def _get_aliases(self, obj_func_or_class):
for kw in self._ALIAS_KEYWORDS:
if hasattr(obj_func_or_class, kw):
return getattr(obj_func_or_class, kw)
return []
def register(
self, obj: object = None, aliases: Sequence[str] = None
) -> Optional[object]:
if obj is None:
# used as a decorator
def deco(func_or_class: object, aliases=None) -> object:
name = func_or_class.__name__ # pyre-ignore
self._do_register(name, func_or_class)
if aliases is None:
aliases = self._get_aliases(func_or_class)
if not isinstance(aliases, (list, tuple, set)):
aliases = [aliases]
for alias in aliases:
self._do_register(alias, func_or_class)
return func_or_class
kwargs = {"aliases": aliases}
if any(v is not None for v in kwargs.values()):
return functools.partial(deco, **kwargs)
else:
return deco
name = obj.__name__ # pyre-ignore
self._do_register(name, obj)
if aliases is None:
aliases = self._get_aliases(obj)
for alias in aliases:
self._do_register(alias, obj)
def _do_register(self, name: str, obj: Any, **kwargs) -> None:
self._metadata_map[name] = {"name": name, "description": obj.__doc__, **kwargs}
return super()._do_register(name, obj)
@property
def names(self) -> List[str]:
return list(self._obj_map.keys())
@property
def catalog(self) -> DataFrame:
rows = []
for name, builder in self:
rows.append(builder.info.__dict__)
return DataFrame(rows)
def __repr__(self) -> str:
table = tabulate(self._metadata_map.values(), tablefmt="fancy_grid")
return "Registry of {}:\n".format(self._name) + table
|
meerkat-main
|
meerkat/tools/registry.py
|
"""Adapted from https://github.com/facebookresearch/detectron2."""
import importlib
import os
import platform
import re
import subprocess
import sys
from collections import defaultdict
import numpy as np
import PIL
from tabulate import tabulate
from meerkat.tools.lazy_loader import LazyLoader
from meerkat.version import __version__
torch = LazyLoader("torch")
torchvision = LazyLoader("torchvision")
__all__ = ["collect_env_info"]
def collect_torch_env():
try:
import torch.__config__
return torch.__config__.show()
except ImportError:
# compatible with older versions of pytorch
from torch.utils.collect_env import get_pretty_env_info
return get_pretty_env_info()
def detect_compute_compatibility(CUDA_HOME, so_file):
try:
cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump")
if os.path.isfile(cuobjdump):
output = subprocess.check_output(
"'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True
)
output = output.decode("utf-8").strip().split("\n")
sm = []
for line in output:
line = re.findall(r"\.sm_[0-9]*\.", line)[0]
sm.append(line.strip("."))
sm = sorted(set(sm))
return ", ".join(sm)
else:
return so_file + "; cannot find cuobjdump"
except Exception:
# unhandled failure
return so_file
def collect_torchvision_env():
from torch.utils.cpp_extension import CUDA_HOME
has_cuda = torch.cuda.is_available()
data = []
try:
import torchvision
data.append(
(
"torchvision",
str(torchvision.__version__)
+ " @"
+ os.path.dirname(torchvision.__file__),
)
)
if has_cuda:
try:
torchvision_C = importlib.util.find_spec("torchvision._C").origin
msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)
data.append(("torchvision arch flags", msg))
except ImportError:
data.append(("torchvision._C", "failed to find"))
except (AttributeError, ModuleNotFoundError):
data.append(("torchvision", "unknown"))
return data
def _get_version(module_name: str, raise_error: bool = False) -> str:
"""Get version of a module from subprocess."""
try:
return subprocess.run(
[module_name, "--version"], stdout=subprocess.PIPE
).stdout.decode("utf-8")
except Exception as e:
if raise_error:
raise e
return "unknown"
def collect_env_info():
has_cuda = torch.cuda.is_available()
data = []
data.append(("sys.platform", sys.platform))
data.append(("platform.platform", platform.platform()))
data.append(("node", _get_version("node")))
data.append(("npm", _get_version("npm")))
data.append(("Python", sys.version.replace("\n", "")))
data.append(("meerkat", __version__))
data.append(("numpy", np.__version__))
data.append(("PyTorch", torch.__version__ + " @" + os.path.dirname(torch.__file__)))
data.append(("PyTorch debug build", torch.version.debug))
data.append(("CUDA available", has_cuda))
if has_cuda:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, devids in devices.items():
data.append(("GPU " + ",".join(devids), name))
from torch.utils.cpp_extension import CUDA_HOME
data.append(("CUDA_HOME", str(CUDA_HOME)))
if CUDA_HOME is not None and os.path.isdir(CUDA_HOME):
try:
nvcc = os.path.join(CUDA_HOME, "bin", "nvcc")
nvcc = subprocess.check_output(
"'{}' -V | tail -n1".format(nvcc), shell=True
)
nvcc = nvcc.decode("utf-8").strip()
except subprocess.SubprocessError:
nvcc = "Not Available"
data.append(("NVCC", nvcc))
cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
if cuda_arch_list:
data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list))
data.append(("Pillow", PIL.__version__))
# torchvision
data.extend(collect_torchvision_env())
try:
import cv2
data.append(("cv2", cv2.__version__))
except ImportError:
pass
# Slurm info
data.append(("SLURM_JOB_ID", os.environ.get("SLURM_JOB_ID", "slurm not detected")))
env_str = tabulate(data) + "\n"
env_str += collect_torch_env()
return env_str
if __name__ == "__main__":
print(collect_env_info())
|
meerkat-main
|
meerkat/tools/collect_env.py
|
meerkat-main
|
meerkat/tools/__init__.py
|
|
import abc
from string import Template
from textwrap import dedent, indent
from typing import Any, Callable, Dict
FuncType = Callable[..., Any]
GLOBAL_DOCS = {}
def doc(source: Dict[str, str] = None, **kwargs) -> Callable[[FuncType], FuncType]:
"""
A decorator take docstring templates, concatenate them and perform string
substitution on it.
This decorator will add a variable "_docstring_components" to the wrapped
callable to keep track the original docstring template for potential usage.
If it should be consider as a template, it will be saved as a string.
Otherwise, it will be saved as callable, and later user __doc__ and dedent
to get docstring.
Parameters
----------
*docstrings : None, str, or callable
The string / docstring / docstring template to be appended in order
after default docstring under callable.
**params
The string which would be used to format docstring template.
References:
This decorator is adapted from `pandas.util._decorators.doc`
"""
if source is None:
source = GLOBAL_DOCS
def decorator(decorated: FuncType) -> FuncType:
docstring = decorated.__doc__
if docstring is None:
docstring = ""
fixed_source = {
key: (
value.fix_indentation(docstring)
if isinstance(value, DocComponent)
else value
)
for key, value in source.items()
}
docstring = Template(docstring).safe_substitute(**fixed_source)
docstring = Template(docstring).safe_substitute(**kwargs)
decorated.__doc__ = docstring
return decorated
return decorator
class DocComponent(abc.ABC):
def __init__(self, text: str):
self.text = text
@abc.abstractmethod
def fix_indentation(self, docstring: str) -> str:
raise NotImplementedError()
class DescriptionSection(DocComponent):
def fix_indentation(self, docstring: str) -> str:
# get common leading whitespace from docstring ignoring first line
lines = docstring.splitlines()
leading_whitespace = min(
len(line) - len(line.lstrip()) for line in lines[1:] if line.strip()
)
prefix = leading_whitespace * " "
text = indent(dedent(self.text), prefix)
return text
class Arg(DocComponent):
def fix_indentation(self, docstring: str) -> str:
# get common leading whitespace from docstring ignoring first line
lines = docstring.splitlines()
leading_whitespace = min(
len(line) - len(line.lstrip()) for line in lines[1:] if line.strip()
)
prefix = (leading_whitespace + 4) * " "
text = indent(dedent(self.text), prefix)
return text
class ArgDescription(DocComponent):
def fix_indentation(self, docstring: str) -> str:
# get common leading whitespace from docstring
lines = docstring.splitlines()
leading_whitespace = min(
len(line) - len(line.lstrip()) for line in lines if line.strip()
)
prefix = (leading_whitespace + 4) * " "
text = indent(dedent(self.text), prefix)
# remove prefix from first line
text = text[len(prefix) :]
return text
|
meerkat-main
|
meerkat/tools/docs.py
|
"""
Source: https://github.com/dmfrey/FileLock
"""
import errno
import os
import time
class FileLockException(Exception):
pass
class FileLock(object):
"""A file locking mechanism that has context-manager support so you can use
it in a with statement.
This should be relatively cross compatible as it doesn't rely on
msvcrt or fcntl for the locking.
"""
def __init__(self, file_name, timeout=10, delay=0.05):
"""Prepare the file locker.
Specify the file to lock and optionally the maximum timeout and
the delay between each attempt to lock.
"""
if timeout is not None and delay is None:
raise ValueError("If timeout is not None, then delay must not be None.")
self.is_locked = False
self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name)
self.file_name = file_name
self.timeout = timeout
self.delay = delay
def acquire(self):
"""Acquire the lock, if possible.
If the lock is in use, it check again every `wait` seconds. It
does this until it either gets the lock or exceeds `timeout`
number of seconds, in which case it throws an exception.
"""
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
self.is_locked = True
break
except OSError as e:
if e.errno != errno.EEXIST:
raise
if self.timeout is None:
raise FileLockException(
"Could not acquire lock on {}".format(self.file_name)
)
if (time.time() - start_time) >= self.timeout:
raise FileLockException(
"Timeout occured: could not acquire lock on {}".format(
self.lockfile
)
)
time.sleep(self.delay)
def release(self):
"""Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
"""Activated when used in the with statement.
Should automatically acquire a lock to be used in the with
block.
"""
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
"""Activated at the end of the with statement.
It automatically releases the lock if it isn't locked.
"""
if self.is_locked:
self.release()
def __del__(self):
"""Make sure that the FileLock instance doesn't leave a lockfile lying
around."""
self.release()
|
meerkat-main
|
meerkat/tools/filelock.py
|
import inspect
import sys
import types
import typing
import warnings
import weakref
from collections import defaultdict
from collections.abc import Mapping
from functools import reduce, wraps
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
import dill
import numpy as np
import pandas as pd
import yaml
from yaml.constructor import ConstructorError
from meerkat import env
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
def is_subclass(v, cls):
"""Check if `v` is a subclass of `cls`, with guard for TypeError."""
try:
_is_subclass = issubclass(v, cls)
except TypeError:
_is_subclass = False
return _is_subclass
def has_var_kwargs(fn: Callable) -> bool:
"""Check if a function has variable keyword arguments e.g. **kwargs.
Args:
fn: The function to check.
Returns:
True if the function has variable keyword arguments, False otherwise.
"""
sig = inspect.signature(fn)
params = sig.parameters.values()
return any([True for p in params if p.kind == p.VAR_KEYWORD])
def has_var_args(fn: Callable) -> bool:
"""Check if a function has variable positional arguments e.g. *args.
Args:
fn: The function to check.
Returns:
True if the function has variable positional arguments, False otherwise.
"""
sig = inspect.signature(fn)
params = sig.parameters.values()
return any([True for p in params if p.kind == p.VAR_POSITIONAL])
def get_type_hint_args(type_hint):
"""Get the arguments of a type hint."""
if sys.version_info >= (3, 8):
# Python > 3.8
return typing.get_args(type_hint)
else:
return type_hint.__args__
def get_type_hint_origin(type_hint):
"""Get the origin of a type hint."""
if sys.version_info >= (3, 8):
# Python > 3.8
return typing.get_origin(type_hint)
else:
return type_hint.__origin__
class classproperty(property):
"""Taken from https://stackoverflow.com/a/13624858.
The behavior of class properties using the @classmethod and
@property decorators has changed across Python versions. This class
(should) provide consistent behavior across Python versions. See
https://stackoverflow.com/a/1800999 for more information.
"""
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
def deprecated(replacement: Optional[str] = None):
"""This is a decorator which can be used to mark functions as deprecated.
It will result in a warning being emitted when the function is used.
"""
def _decorator(func):
@wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning) # turn off filter
warnings.warn(
"Call to deprecated function {}.".format(func.__name__) + ""
if new_func is None
else " Use {} instead.".format(replacement),
category=DeprecationWarning,
stacklevel=2,
)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
return _decorator
def requires(*packages):
"""Use this decorator to identify which packages must be installed.
It will raise an error if the function is called and these packages
are not available.
"""
def _decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
for package in packages:
fn_str = f"{func.__qualname__}()"
if not env.package_available(package):
raise ImportError(
f"Missing package `{package}` which is required for {fn_str}."
)
return func(*args, **kwargs)
return wrapped
return _decorator
class WeakMapping(Mapping):
def __init__(self):
self.refs: Dict[Any, weakref.ReferenceType] = {}
def __getitem__(self, key: str):
ref = self.refs[key]
obj = ref()
if obj is None:
raise KeyError(f"Object with key {key} no longer exists")
return obj
def __setitem__(self, key: str, value: Any):
self.refs[key] = weakref.ref(value)
def __delitem__(self, key: str):
del self.refs[key]
def __iter__(self):
return iter(self.refs)
def __len__(self):
return len(self.refs)
def nested_getattr(obj, attr, *args):
"""Get a nested property from an object.
# noqa: E501
Source: https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-subobjects-chained-properties
"""
return reduce(lambda o, a: getattr(o, a, *args), [obj] + attr.split("."))
def nested_apply(obj: object, fn: callable, base_types: Tuple[type] = ()):
if isinstance(obj, base_types):
return fn(obj)
elif isinstance(obj, list):
return [nested_apply(v, fn=fn, base_types=base_types) for v in obj]
elif isinstance(obj, tuple):
return tuple(nested_apply(v, fn=fn, base_types=base_types) for v in obj)
elif isinstance(obj, dict):
return {
k: nested_apply(v, fn=fn, base_types=base_types) for k, v in obj.items()
}
else:
return fn(obj)
BACKWARDS_COMPAT_REPLACEMENTS = [
("meerkat.ml", "meerkat.nn"),
("meerkat.columns.numpy_column", "meerkat.columns.tensor.numpy"),
("NumpyArrayColumn", "NumPyTensorColumn"),
("meerkat.columns.tensor_column", "meerkat.columns.tensor.torch"),
("meerkat.columns.pandas_column", "meerkat.columns.scalar.pandas"),
("meerkat.columns.arrow_column", "meerkat.columns.scalar.arrow"),
("meerkat.columns.image_column", "meerkat.columns.deferred.image"),
("meerkat.columns.file_column", "meerkat.columns.deferred.file"),
("meerkat.columns.list_column", "meerkat.columns.object.base"),
("meerkat.block.lambda_block", "meerkat.block.deferred_block"),
(
"meerkat.interactive.app.src.lib.component.filter",
"meerkat.interactive.app.src.lib.component.core.filter",
),
("ListColumn", "ObjectColumn"),
("LambdaBlock", "DeferredBlock"),
("NumpyBlock", "NumPyBlock"),
]
class MeerkatDumper(yaml.Dumper):
@staticmethod
def _pickled_object_representer(dumper, data):
return dumper.represent_mapping(
"!PickledObject", {"class": data.__class__, "pickle": dill.dumps(data)}
)
@staticmethod
def _function_representer(dumper, data):
if data.__name__ == "<lambda>":
return dumper.represent_mapping(
"!Lambda",
{"code": inspect.getsource(data), "pickle": dill.dumps(data)},
)
if "<locals>" in data.__qualname__:
return dumper.represent_mapping(
"!NestedFunction",
{"code": inspect.getsource(data), "pickle": dill.dumps(data)},
)
return dumper.represent_name(data)
MeerkatDumper.add_multi_representer(object, MeerkatDumper._pickled_object_representer)
MeerkatDumper.add_representer(types.FunctionType, MeerkatDumper._function_representer)
class MeerkatLoader(yaml.FullLoader):
"""PyYaml does not load unimported modules for safety reasons.
We want to allow importing only meerkat modules
"""
def find_python_module(self, name: str, mark, unsafe=False):
try:
return super().find_python_module(name=name, mark=mark, unsafe=unsafe)
except ConstructorError as e:
if name.startswith("meerkat."):
__import__(name)
else:
raise e
return super().find_python_module(name=name, mark=mark, unsafe=unsafe)
def find_python_name(self, name: str, mark, unsafe=False):
for old, new in BACKWARDS_COMPAT_REPLACEMENTS:
if old in name:
name = name.replace(old, new)
if "." in name:
module_name, _ = name.rsplit(".", 1)
else:
module_name = "builtins"
try:
return super().find_python_name(name=name, mark=mark, unsafe=unsafe)
except ConstructorError as e:
if name.startswith("meerkat."):
__import__(module_name)
else:
raise e
return super().find_python_name(name=name, mark=mark, unsafe=unsafe)
@staticmethod
def _pickled_object_constructor(loader, node):
data = loader.construct_mapping(node)
return dill.loads(data["pickle"])
@staticmethod
def _function_constructor(loader, node):
data = loader.construct_mapping(node)
return dill.loads(data["pickle"])
MeerkatLoader.add_constructor(
"!PickledObject", MeerkatLoader._pickled_object_constructor
)
MeerkatLoader.add_constructor("!Lambda", MeerkatLoader._function_constructor)
def dump_yaml(obj: Any, path: str, **kwargs):
with open(path, "w") as f:
yaml.dump(obj, f, Dumper=MeerkatDumper, **kwargs)
def load_yaml(path: str, **kwargs):
with open(path, "r") as f:
return yaml.load(f, Loader=MeerkatLoader, **kwargs)
class MeerkatUnpickler(dill.Unpickler):
def find_class(self, module, name):
try:
return super().find_class(module, name)
except Exception:
for old, new in BACKWARDS_COMPAT_REPLACEMENTS:
if old in module:
module = module.replace(old, new)
return super().find_class(module, name)
def meerkat_dill_load(path: str):
"""Load dill file with backwards compatibility for old column names."""
return MeerkatUnpickler(open(path, "rb")).load()
def convert_to_batch_column_fn(
function: Callable, with_indices: bool, materialize: bool = True, **kwargs
):
"""Batch a function that applies to an example."""
def _function(batch: Sequence, indices: Optional[List[int]], *args, **kwargs):
# Pull out the batch size
batch_size = len(batch)
# Iterate and apply the function
outputs = None
for i in range(batch_size):
# Apply the unbatched function
if with_indices:
output = function(
batch[i] if materialize else batch[i],
indices[i],
*args,
**kwargs,
)
else:
output = function(
batch[i] if materialize else batch[i],
*args,
**kwargs,
)
if i == 0:
# Create an empty dict or list for the outputs
outputs = defaultdict(list) if isinstance(output, dict) else []
# Append the output
if isinstance(output, dict):
for k in output.keys():
outputs[k].append(output[k])
else:
outputs.append(output)
if isinstance(outputs, dict):
return dict(outputs)
return outputs
if with_indices:
# Just return the function as is
return _function
else:
# Wrap in a lambda to apply the indices argument
return lambda batch, *args, **kwargs: _function(batch, None, *args, **kwargs)
def convert_to_batch_fn(
function: Callable, with_indices: bool, materialize: bool = True, **kwargs
):
"""Batch a function that applies to an example."""
def _function(
batch: Dict[str, List], indices: Optional[List[int]], *args, **kwargs
):
# Pull out the batch size
batch_size = len(batch[list(batch.keys())[0]])
# Iterate and apply the function
outputs = None
for i in range(batch_size):
# Apply the unbatched function
if with_indices:
output = function(
{k: v[i] if materialize else v[i] for k, v in batch.items()},
indices[i],
*args,
**kwargs,
)
else:
output = function(
{k: v[i] if materialize else v[i] for k, v in batch.items()},
*args,
**kwargs,
)
if i == 0:
# Create an empty dict or list for the outputs
outputs = defaultdict(list) if isinstance(output, dict) else []
# Append the output
if isinstance(output, dict):
for k in output.keys():
outputs[k].append(output[k])
else:
outputs.append(output)
if isinstance(outputs, dict):
return dict(outputs)
return outputs
if with_indices:
# Just return the function as is
return _function
else:
# Wrap in a lambda to apply the indices argument
return lambda batch, *args, **kwargs: _function(batch, None, *args, **kwargs)
def convert_to_python(obj: Any):
"""Utility for converting NumPy and torch dtypes to native python types.
Useful when sending objects to frontend.
"""
import torch
if torch.is_tensor(obj):
obj = obj.numpy()
if isinstance(obj, np.generic):
obj = obj.item()
return obj
def translate_index(index, length: int):
def _is_batch_index(index):
# np.ndarray indexed with a tuple of length 1 does not return an np.ndarray
# so we match this behavior
return not (
isinstance(index, int) or (isinstance(index, tuple) and len(index) == 1)
)
# `index` should return a single element
if not _is_batch_index(index):
return index
from ..columns.scalar.abstract import ScalarColumn
from ..columns.tensor.abstract import TensorColumn
if isinstance(index, pd.Series):
index = index.values
if torch.is_tensor(index):
index = index.numpy()
if isinstance(index, tuple) or isinstance(index, list):
index = np.array(index)
if isinstance(index, ScalarColumn):
index = index.to_numpy()
if isinstance(index, TensorColumn):
if len(index.shape) == 1:
index = index.to_numpy()
else:
raise TypeError(
"`TensorColumn` index must have 1 axis, not {}".format(len(index.shape))
)
# `index` should return a batch
if isinstance(index, slice):
# int or slice index => standard list slicing
indices = np.arange(*index.indices(length))
elif isinstance(index, np.ndarray):
if len(index.shape) != 1:
raise TypeError(
"`np.ndarray` index must have 1 axis, not {}".format(len(index.shape))
)
if index.dtype == bool:
indices = np.where(index)[0]
else:
return index
else:
raise TypeError("Object of type {} is not a valid index".format(type(index)))
return indices
def choose_device(device: str = "auto"):
"""Choose the device to use for a Meerkat operation."""
from meerkat.config import config
if not config.system.use_gpu:
return "cpu"
if device == "auto":
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
return device
|
meerkat-main
|
meerkat/tools/utils.py
|
class Singleton(type):
"""A metaclass that ensures only one instance of a class is created.
Usage:
>>> class Example(metaclass=Singleton):
... def __init__(self, x):
... self.x = x
>>> a = Example(1)
>>> b = Example(2)
>>> print(a, id(a))
<__main__.Example object at 0x7f8b8c0b7a90> 140071000000000
>>> print(b, id(b))
<__main__.Example object at 0x7f8b8c0b7a90> 140071000000000
"""
_instances = {}
def __call__(cls, *args, **kwargs):
# Look up if this cls pair has been created before
if cls not in cls._instances:
# If not, we let a new instance be created
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
|
meerkat-main
|
meerkat/tools/singleton.py
|
import abc
class AbstractWriter(abc.ABC):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __repr__(self):
return f"{self.__class__.__name__}"
def __str__(self):
return f"{self.__class__.__name__}"
@abc.abstractmethod
def open(self, *args, **kwargs) -> None:
return NotImplemented
@abc.abstractmethod
def write(self, data, *args, **kwargs) -> None:
return NotImplemented
@abc.abstractmethod
def flush(self, *args, **kwargs) -> None:
return NotImplemented
@abc.abstractmethod
def close(self, *args, **kwargs) -> None:
return NotImplemented
@abc.abstractmethod
def finalize(self, *args, **kwargs) -> None:
return NotImplemented
|
meerkat-main
|
meerkat/writers/abstract.py
|
from meerkat.columns.abstract import Column
from meerkat.writers.abstract import AbstractWriter
class ConcatWriter(AbstractWriter):
def __init__(
self,
output_type: type = Column,
template: Column = None,
*args,
**kwargs,
):
super(ConcatWriter, self).__init__(*args, **kwargs)
self.output_type = output_type
self.template = template
def open(self) -> None:
self.outputs = []
def write(self, data, **kwargs) -> None:
# convert to Meerkat column if not already
if self.template is not None:
if isinstance(data, Column):
data = data.data
data = self.template._clone(data=data)
elif not isinstance(data, Column):
data = self.output_type(data)
self.outputs.append(data)
def flush(self):
pass
def close(self, *args, **kwargs):
pass
def finalize(self, *args, **kwargs) -> None:
from meerkat.ops.concat import concat
return concat(self.outputs)
|
meerkat-main
|
meerkat/writers/concat_writer.py
|
meerkat-main
|
meerkat/writers/__init__.py
|
|
import os
from pathlib import Path
from numpy.lib.format import open_memmap
from meerkat.columns.abstract import Column
from meerkat.columns.tensor.numpy import NumPyTensorColumn
from meerkat.writers.abstract import AbstractWriter
class NumpyMemmapWriter(AbstractWriter):
def __init__(
self,
path: str = None,
dtype: str = "float32",
mode: str = "r",
shape: tuple = None,
output_type: type = NumPyTensorColumn,
template: Column = None,
*args,
**kwargs,
):
super(NumpyMemmapWriter, self).__init__(*args, **kwargs)
# File used to store data
self.file = None
# Location of the pointer
self._pointer = 0
# If `path` is specified
self.path = path
self.dtype = dtype
self.shape = shape
if path is not None:
self.open(path=path, dtype=dtype, mode=mode, shape=shape)
self.output_type = output_type
self.template = template
def open(
self,
path: str,
dtype: str = "float32",
mode: str = "w+",
shape: tuple = None,
) -> None:
assert shape is not None, "Must specify `shape`."
# Make all dirs to path
os.makedirs(str(Path(path).absolute().parent), exist_ok=True)
# Open the file as a memmap
self.file = open_memmap(path, dtype=dtype, mode=mode, shape=shape)
self._pointer = 0
self.path = path
self.shape = shape
def write(self, arr, **kwargs) -> None:
self.file[self._pointer : self._pointer + len(arr)] = arr
self._pointer += len(arr)
def flush(self):
"""Close the mmap file and reopen to release memory."""
self.file.flush()
self.file.base.close()
# ‘r+’ Open existing file for reading and writing.
self.file = open_memmap(self.file.filename, mode="r+")
def finalize(self, *args, **kwargs) -> Column:
self.flush()
data = self.file
if self.template is not None:
if isinstance(data, Column):
data = data.data
data = self.template._clone(data=data)
else:
data = self.output_type(data)
return data
def close(self, *args, **kwargs) -> None:
pass
|
meerkat-main
|
meerkat/writers/numpy_writer.py
|
from typing import Callable, Union
class AggregationError(ValueError):
pass
class AggregateMixin:
AGGREGATIONS = [
"mean",
]
def __init__(self, *args, **kwargs):
super().__init__()
def aggregate(self, function: Union[Callable, str], *args, **kwargs):
if isinstance(function, str):
if function not in self.AGGREGATIONS:
raise ValueError(f"{function} is not a valid aggregation")
return getattr(self, function)(*args, **kwargs)
else:
return function(self, *args, **kwargs)
def mean(self, *args, **kwargs):
raise AggregationError(
f"Aggregation 'mean' not implemented for column of type {type(self)}."
)
|
meerkat-main
|
meerkat/mixins/aggregate.py
|
from collections.abc import Callable
from typing import List
def identity_collate(batch: List):
return batch
class CollateMixin:
def __init__(self, collate_fn: Callable = None, *args, **kwargs):
super(CollateMixin, self).__init__(*args, **kwargs)
if collate_fn is not None:
self._collate_fn = collate_fn
else:
self._collate_fn = identity_collate
@property
def collate_fn(self):
"""Method used to collate."""
return self._collate_fn
@collate_fn.setter
def collate_fn(self, value):
self._collate_fn = value
def collate(self, *args, **kwargs):
"""Collate data."""
return self._collate_fn(*args, **kwargs)
|
meerkat-main
|
meerkat/mixins/collate.py
|
from collections.abc import Callable, Mapping, Sequence
from types import SimpleNamespace
import numpy as np
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
class FunctionInspectorMixin:
def __init__(self, *args, **kwargs):
super(FunctionInspectorMixin, self).__init__(*args, **kwargs)
def _inspect_function(
self,
function: Callable,
with_indices: bool = False,
is_batched_fn: bool = False,
data=None,
indices=None,
materialize=True,
**kwargs
) -> SimpleNamespace:
# Initialize variables to track
no_output = dict_output = bool_output = list_output = False
# If dict_output = True and `function` is used for updating the `DataFrame`
# useful to know if any existing column is modified
updates_existing_column = True
existing_columns_updated = []
# Record the shape and dtype of the output
output_shape = output_dtype = None
# Run the function to test it
if data is None:
if is_batched_fn:
data = self[:2] if materialize else self[:2]
else:
data = self[0] if materialize else self[0]
if indices is None:
if is_batched_fn:
indices = range(2)
else:
indices = 0
if with_indices and is_batched_fn:
output = function(data, indices, **kwargs)
elif with_indices and not is_batched_fn:
output = function(data, indices, **kwargs)
else:
output = function(data, **kwargs)
# lazy import to avoid circular dependency
from meerkat.columns.abstract import Column
from meerkat.columns.tensor.torch import TorchTensorColumn
if isinstance(output, Mapping):
# `function` returns a dict output
dict_output = True
# Check if `self` is a `DataFrame`
if hasattr(self, "all_columns"):
# Set of columns that are updated
existing_columns_updated = set(self.all_columns).intersection(
set(output.keys())
)
# Check if `function` updates an existing column
if len(existing_columns_updated) == 0:
updates_existing_column = False
elif output is None:
# `function` returns None
no_output = True
elif (
isinstance(output, (bool, np.bool_))
or (
isinstance(output, (np.ndarray, TorchTensorColumn))
and output.dtype == bool
)
or (
isinstance(output, (torch.Tensor, TorchTensorColumn))
and output.dtype == torch.bool
)
):
# `function` returns a bool
bool_output = True
elif isinstance(output, (Sequence, Column, torch.Tensor, np.ndarray)):
# `function` returns a list
list_output = True
if is_batched_fn and (
isinstance(output[0], (bool, np.bool_))
or (isinstance(output[0], np.ndarray) and (output[0].dtype == bool))
or (
isinstance(output[0], torch.Tensor)
and (output[0].dtype == torch.bool)
)
):
# `function` returns a bool per example
bool_output = True
if not isinstance(output, Mapping) and not isinstance(output, bool):
# Record the shape of the output
if hasattr(output, "shape"):
output_shape = output.shape
elif len(output) > 0 and hasattr(output[0], "shape"):
output_shape = output[0].shape
# Record the dtype of the output
if hasattr(output, "dtype"):
output_dtype = output.dtype
elif len(output) > 0 and hasattr(output[0], "dtype"):
output_dtype = output[0].dtype
return SimpleNamespace(
output=output,
dict_output=dict_output,
no_output=no_output,
bool_output=bool_output,
list_output=list_output,
updates_existing_column=updates_existing_column,
existing_columns_updated=existing_columns_updated,
output_shape=output_shape,
output_dtype=output_dtype,
)
|
meerkat-main
|
meerkat/mixins/inspect_fn.py
|
import os
import dill
from meerkat.tools.utils import dump_yaml, load_yaml, meerkat_dill_load
class ColumnIOMixin:
def write(self, path: str, *args, **kwargs) -> None:
assert hasattr(self, "_data"), f"{self.__class__.__name__} requires `self.data`"
# Make all the directories to the path
os.makedirs(path, exist_ok=True)
metadata = self._get_meta()
# Write the state
state = self._get_state()
metadata["state"] = state
self._write_data(path, *args, **kwargs)
# Save the metadata as a yaml file
metadata_path = os.path.join(path, "meta.yaml")
dump_yaml(metadata, metadata_path)
return metadata
def _get_meta(self):
return {
"dtype": type(self),
"len": len(self),
**self.metadata,
}
def _write_data(self, path):
data_path = os.path.join(path, "data.dill")
dill.dump(self.data, open(data_path, "wb"))
def _write_state(self, path):
state = self._get_state()
state_path = os.path.join(path, "state.dill")
dill.dump(state, open(state_path, "wb"))
@classmethod
def read(
cls, path: str, _data: object = None, _meta: object = None, *args, **kwargs
) -> object:
# Load in the metadata
meta = (
dict(load_yaml(os.path.join(path, "meta.yaml"))) if _meta is None else _meta
)
col_type = meta["dtype"]
# Load states
if "state" not in meta:
assert os.path.exists(path), f"`path` {path} does not exist."
try:
state = col_type._read_state(path)
except Exception:
state = None
else:
state = meta["state"]
data = col_type._read_data(path, *args, **kwargs) if _data is None else _data
if state is None:
# KG, Sabri: need to remove this `if-else` in the future,
# this is only for backwards compatibility.
# this if statement will not be required.
col = col_type(data)
else:
col = col_type.__new__(col_type)
col._set_state(state)
col._set_data(data)
from meerkat.interactive.formatter import DeprecatedFormatter
if "_formatters" not in col.__dict__ or isinstance(
col.formatters, DeprecatedFormatter
):
# FIXME: make deprecated above work with new formatters
# PATCH: backwards compatability patch for old dataframes
# saved before v0.2.4
col.formatters = col._get_default_formatters()
return col
@staticmethod
def _read_state(path: str):
return meerkat_dill_load(os.path.join(path, "state.dill"))
@staticmethod
def _read_data(path: str, *args, **kwargs):
return meerkat_dill_load(os.path.join(path, "data.dill"))
|
meerkat-main
|
meerkat/mixins/io.py
|
import logging
from typing import TYPE_CHECKING, Callable, Mapping, Sequence, Type, Union
from pandas.util._decorators import doc
from meerkat.ops.map import defer
if TYPE_CHECKING:
from meerkat.columns.abstract import Column
from meerkat.columns.deferred.base import DeferredColumn
from meerkat.dataframe import DataFrame
logger = logging.getLogger(__name__)
class DeferrableMixin:
def __init__(self, *args, **kwargs):
super(DeferrableMixin, self).__init__(*args, **kwargs)
@doc(defer, data="self")
def defer(
self,
function: Callable,
is_batched_fn: bool = False,
batch_size: int = 1,
inputs: Union[Mapping[str, str], Sequence[str]] = None,
outputs: Union[Mapping[any, str], Sequence[str]] = None,
output_type: Union[Mapping[str, Type["Column"]], Type["Column"]] = None,
materialize: bool = True,
) -> Union["DataFrame", "DeferredColumn"]:
return defer(
data=self,
function=function,
is_batched_fn=is_batched_fn,
batch_size=batch_size,
inputs=inputs,
outputs=outputs,
output_type=output_type,
materialize=materialize,
)
|
meerkat-main
|
meerkat/mixins/deferable.py
|
import inspect
import warnings
from typing import Any
from meerkat.interactive.graph.marking import (
is_unmarked_context,
is_unmarked_fn,
unmarked,
)
class MarkableMixin:
"""A class that can be marked."""
_self_marked: bool = False
@property
def marked(self):
return self._self_marked
def mark(self):
"""Mark this object.
When marked, this object will trigger a reactive function when
passed in as an argument.
"""
self._self_marked = True
return self
def unmark(self):
"""Unmark this object.
When unmarked, this object will not trigger a reactive function
when passed in as an argument.
"""
self._self_marked = False
return self
class ReactifiableMixin(MarkableMixin):
"""A class that reactifies all attribute accesses.
When an attribute accessor is "reactified" and is called within
a :cls:`mk.gui.react` context, it will add accessing the attribute
to the graph. This means if the object changes, the attribute
will be fetched again.
Outside of this context, the method will not add operations and the outputs
to the graph.
"""
# TODO: Clean and investigate failure points of this function.
def __getattribute__(self, name: str) -> Any:
from meerkat.interactive.graph.reactivity import is_reactive_fn, reactive
# We assume accessing the attribute twice will not result in different values.
# We dont explicitly check for this because it is expensive.
with unmarked():
is_self_marked = super().__getattribute__("_self_marked")
attr = super().__getattribute__(name)
is_method_or_fn = inspect.ismethod(attr) or inspect.isfunction(attr)
attr_is_reactive_fn = is_method_or_fn and is_reactive_fn(attr)
attr_is_unmarked_fn = is_method_or_fn and is_unmarked_fn(attr)
if is_method_or_fn:
if name in ["_reactive_warning", "_set_data"]:
# There are some functions we never want to wrap because the function
# needs access to the current reactive state (i.e. is_reactive()).
# These functions should all be declared here.
# For example, `_reactive_warning` needs to know if we are in a
# reactive context, in order to determine if we should raise a warning.
return attr
if name in ["mark", "unmark", "attach_to_inode"]:
# Regardless of whether `self` is marked or not, these methods should
# never be wrapped in @reactive or run any statements inside them
# reactively. All such methods should be declared here.
# So far, we include methods in MarkableMixin and NodeMixin.
# Therefore, we explicitly wrap them in @unmarked, so that they can
# never be reactive.
return unmarked()(attr)
if not attr_is_reactive_fn and not attr_is_unmarked_fn:
# By default, all methods of a ReactifiableMixin are unmarked.
# This makes it cleaner because the developer only has to decorate
# methods with @reactive if they want them to be reactive.
# The alternative here would be to do nothing - i.e. to leave methods
# that are undecorated, undecorated. However, this has some
# consequences: particularly, that code inside the method could still
# be reactive if the method was passed in a marked argument.
# Here's an example:
#
# def foo(self, x: int):
# # this will be reactive when a is marked, because
# # properties are reactive
# a = self.property
# # this will add an operation to the graph if x is marked
# b = reactive(fn)(x)
#
# By using the @unmarked decorator, we can avoid random lines of code
# becoming operations on the graph and being rerun - i.e. the method
# behaves like a single unit, rather than separate lines of code.
# For methods of a class, this makes sense because the user should
# not be privvy to the internals of the method. They should only
# see what they put in and what comes out - so the method effectively
# should behave like a single black box.
#
# The user can always define their own reactive function that wraps a
# method like `foo` and make it reactive!
return unmarked()(attr)
return attr
# TODO: Handle functions that are stored as attributes.
# These functions should be wrapped in reactive when the object is reactive.
# For ordinary attributes, we need to check if reactive=True.
# FIXME: We may be able to get rid of this distinction by decorating
# Store.__call__ with @reactive.
if (
# Don't do anything when the object is not marked, it should behave
# as normal in that case.
is_self_marked
and not is_unmarked_context()
# Ignore private attributes.
and not name.startswith("_")
# Ignore dunder attributes.
and not name.startswith("__")
# Ignore all node-related attributes. These should never be accessed
# in a reactive way.
# NOTE: There seems to be some dependence between NodeMixin and this class.
# Specially, anything that is a ReactifiableMixin should also be a
# NodeMixin. Consider having this class inherit from NodeMixin.
and name
not in ("_self_inode", "inode", "inode_id", "_self_marked", "marked", "id")
):
# Only build the function if we are in a reactive context.
# TODO: Cache this function so that it is faster.
def _fn(_obj):
return super().__getattribute__(name)
_fn.__name__ = name
_fn = reactive(_fn, nested_return=False)
return _fn(self)
else:
return attr
def _reactive_warning(self, name, placeholder="obj"):
if not is_unmarked_context() and self.marked:
warnings.warn(
f"Calling {name}({placeholder}) is not reactive. "
f"Use `mk.{name}({placeholder})` to get"
"a reactive variable (i.e. a Store). "
f"`mk.{name}({placeholder})` behaves exactly"
f"like {name}({placeholder}) outside of this difference."
)
"""
We include the following dunder methods as examples of how to correctly
implement them when using this Mixin.
"""
def __len__(self):
with unmarked():
out = super().__len__()
self._reactive_warning("len")
return out
def __int__(self):
with unmarked():
out = super().__int__()
self._reactive_warning("int")
return out
def __long__(self):
with unmarked():
out = super().__long__()
self._reactive_warning("long")
return out
def __float__(self):
with unmarked():
out = super().__float__()
self._reactive_warning("float")
return out
def __complex__(self):
with unmarked():
out = super().__complex__()
self._reactive_warning("complex")
return out
def __oct__(self):
with unmarked():
out = super().__oct__()
self._reactive_warning("oct")
return out
def __hex__(self):
with unmarked():
out = super().__hex__()
self._reactive_warning("hex")
return out
|
meerkat-main
|
meerkat/mixins/reactifiable.py
|
meerkat-main
|
meerkat/mixins/__init__.py
|
|
import os
from pathlib import Path
from typing import Sequence, Union
PathLikeType = Union[str, Path, os.PathLike]
PathLike = (str, Path, os.PathLike)
class FileMixin:
"""Mixin for adding in single filepath."""
# TODO(karan): this also actually works on dirs, so rename
def __init__(self, filepath: Union[str, Path], *args, **kwargs):
super(FileMixin, self).__init__(*args, **kwargs)
if isinstance(filepath, str):
filepath = Path(filepath)
# Assign the path
self.filepath = filepath
def __getattr__(self, item):
try:
return getattr(self.filepath, item)
except AttributeError:
raise AttributeError(f"Attribute {item} not found.")
class PathsMixin:
"""Mixin for adding in generic paths."""
def __init__(
self,
paths: Union[PathLikeType, Sequence[PathLikeType]],
*args,
**kwargs,
):
super(PathsMixin, self).__init__(*args, **kwargs)
if isinstance(paths, PathLike):
paths = [Path(paths)]
elif not isinstance(paths, str) and isinstance(paths, Sequence):
paths = [Path(p) for p in paths]
else:
raise NotImplementedError
# Assign the path
# TODO: make this a property
self.paths = paths
def __getattr__(self, item):
try:
return [getattr(p, item) for p in self.paths]
except AttributeError:
raise AttributeError(f"Attribute {item} not found.")
|
meerkat-main
|
meerkat/mixins/file.py
|
from meerkat.block.abstract import BlockView
class BlockableMixin:
def __init__(self, *args, **kwargs):
super(BlockableMixin, self).__init__(*args, **kwargs)
block_class: type = None
@classmethod
def is_blockable(cls):
return cls.block_class is not None
def _unpack_block_view(self, data):
if isinstance(data, BlockView):
self._block = data.block
self._block_index = data.block_index
data = data.data
else:
block_view: BlockView = self.block_class.from_column_data(data)
self._block, self._block_index = block_view.block, block_view.block_index
data = block_view.data
return data
def _pack_block_view(self):
return BlockView(block_index=self._block_index, block=self._block)
def run_block_method(self, method: str, *args, **kwargs):
result = getattr(self._block.subblock([self._block_index]), method)(
*args, **kwargs
)
return result[self._block_index]
|
meerkat-main
|
meerkat/mixins/blockable.py
|
from uuid import uuid4
from meerkat.tools.utils import classproperty
_MK_ID_PREFIX = "__mkid__"
class IdentifiableMixin:
"""Mixin for classes, to give objects an id.
This class must use _self_{attribute} for all attributes since it
will be mixed into the wrapt.ObjectProxy class, which requires this
naming convention for it to work.
"""
_self_identifiable_group: str
def __init__(self, id: str = None, *args, **kwargs):
super(IdentifiableMixin, self).__init__(*args, **kwargs)
self._set_id(id=id)
@property
def id(self):
return self._self_id
# Note: this is set to be a classproperty, so that we can access either
# cls.identifiable_group or self.identifiable_group.
# If this is changed to a property, then we can only access
# self.identifiable_group, and cls._self_identifiable_group but not
# cls.identifiable_group. This is fine if something breaks, but
# be careful to change cls.identifiable_group to
# cls._self_identifiable_group everywhere.
@classproperty
def identifiable_group(self):
return self._self_identifiable_group
def _set_id(self, id: str = None):
# get uuid as str
if id is None:
self._self_id = _MK_ID_PREFIX + uuid4().hex
else:
# TODO: Have objects that need special treatment for being
# detected in endpoint (e.g. Store, dataframes) implement their
# own set id method to assert that id is a meerkat id.
# assert is_meerkat_id(id)
self._self_id = id
from meerkat.state import state
state.identifiables.add(self)
@classmethod
def from_id(cls, id: str):
# TODO(karan): make sure we're using this everywhere and it's not
# being redefined in subclasses
from meerkat.state import state
return state.identifiables.get(id=id, group=cls.identifiable_group)
@staticmethod
def prepend_meerkat_id_prefix(id: str) -> str:
return _MK_ID_PREFIX + id
def is_meerkat_id(id: str) -> bool:
return id.startswith(_MK_ID_PREFIX)
|
meerkat-main
|
meerkat/mixins/identifiable.py
|
from pyparsing import Mapping
class MaterializationMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def mz(self):
return _MaterializeIndexer(self)
class _MaterializeIndexer:
def __init__(self, obj: object):
self.obj = obj
def __getitem__(self, index):
return self.obj._get(index, materialize=True)
def __len__(self):
return len(self.obj)
@property
def loc(self):
return _LocIndexer(self.obj, materialize=True)
class _LocIndexer:
def __init__(self, obj: object, materialize: bool = False):
self.obj = obj
self.materialize = materialize
def __getitem__(self, keyidx):
return self.obj._get_loc(keyidx, materialize=self.materialize)
def __setitem__(
self,
index,
value,
):
if isinstance(index, tuple):
keyidx, column = index
self.obj._set_loc(keyidx=keyidx, column=column, value=value)
else:
if isinstance(value, Mapping):
raise ValueError(
"Must pass mapping if column is not specified on __setitem__."
)
for column in self.obj.columns:
self.obj._set_loc(
keyidx=index,
column=column,
value=value[column],
)
def __len__(self):
return len(self.obj)
@property
def mz(self):
return _LocIndexer(self.obj, materialize=True)
class IndexerMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def loc(self):
return _LocIndexer(self)
|
meerkat-main
|
meerkat/mixins/indexing.py
|
import logging
from typing import Callable, Dict, Mapping, Optional, Union
import numpy as np
from tqdm.auto import tqdm
from meerkat.provenance import capture_provenance
logger = logging.getLogger(__name__)
class MappableMixin:
def __init__(self, *args, **kwargs):
super(MappableMixin, self).__init__(*args, **kwargs)
@capture_provenance()
def map(
self,
function: Optional[Callable] = None,
with_indices: bool = False,
is_batched_fn: bool = False,
batch_size: Optional[int] = 1,
drop_last_batch: bool = False,
num_workers: Optional[int] = 0,
output_type: Union[type, Dict[str, type]] = None,
materialize: bool = True,
pbar: bool = False,
mmap: bool = False,
mmap_path: str = None,
flush_size: int = None,
**kwargs,
):
from meerkat.columns.abstract import Column
from meerkat.dataframe import DataFrame
"""Map a function over the elements of the column."""
# Return if `self` has no examples
if not len(self):
logger.info("Dataset empty, returning None.")
return None
if not is_batched_fn:
# Convert to a batch function
function = self._convert_to_batch_fn(
function, with_indices=with_indices, materialize=materialize, **kwargs
)
is_batched_fn = True
logger.info(f"Converting `function` {function} to a batched function.")
# Run the map
logger.info("Running `map`, the dataset will be left unchanged.")
for i, batch in tqdm(
enumerate(
self.batch(
batch_size=batch_size,
drop_last_batch=drop_last_batch,
num_workers=num_workers,
materialize=materialize
# TODO: collate=batched was commented out in list_column
)
),
total=(len(self) // batch_size)
+ int(not drop_last_batch and len(self) % batch_size != 0),
disable=not pbar,
):
# Calculate the start and end indexes for the batch
start_index = i * batch_size
end_index = min(len(self), (i + 1) * batch_size)
# Use the first batch for setup
if i == 0:
# Get some information about the function
function_properties = self._inspect_function(
function,
with_indices,
is_batched_fn,
batch,
range(start_index, end_index),
materialize=materialize,
**kwargs,
)
# Pull out information
output = function_properties.output
dtype = function_properties.output_dtype
is_mapping = isinstance(output, Mapping)
is_type_mapping = isinstance(output_type, Mapping)
if not is_mapping and is_type_mapping:
raise ValueError(
"output_type is a mapping but function output is not a mapping"
)
writers = {}
for key, curr_output in (
output.items() if is_mapping else [("0", output)]
):
curr_output_type = (
type(Column.from_data(curr_output))
if output_type is None
or (is_type_mapping and key not in output_type.keys())
else output_type[key]
if is_type_mapping
else output_type
)
writer = curr_output_type.get_writer(
mmap=mmap,
template=(
curr_output.copy()
if isinstance(curr_output, Column)
else None
),
)
# Setup for writing to a certain output column
# TODO: support optionally memmapping only some columns
if mmap:
if not hasattr(curr_output, "shape"):
curr_output = np.array(curr_output)
# Assumes first dimension of output is the batch dimension.
shape = (len(self), *curr_output.shape[1:])
# Construct the mmap file path
if mmap_path is None:
mmap_path = self.logdir / f"{hash(function)}" / key
# Open the output writer
writer.open(str(mmap_path), dtype, shape=shape)
else:
# Create an empty dict or list for the outputs
writer.open()
writers[key] = writer
else:
# Run `function` on the batch
output = (
function(
batch,
range(i * batch_size, min(len(self), (i + 1) * batch_size)),
**kwargs,
)
if with_indices
else function(batch, **kwargs)
)
# Append the output
if output is not None:
if isinstance(output, Mapping):
if set(output.keys()) != set(writers.keys()):
raise ValueError(
"Map function must return same keys for each batch."
)
for k, writer in writers.items():
writer.write(output[k])
else:
writers["0"].write(output)
# intermittently flush
if flush_size is not None and ((i + 1) % flush_size == 0):
for writer in writers.values():
writer.flush()
# Check if we are returning a special output type
outputs = {key: writer.finalize() for key, writer in writers.items()}
if not is_mapping:
outputs = outputs["0"]
else:
# TODO (arjundd): This is duck type. We should probably make this
# class signature explicit.
outputs = (
self._clone(data=outputs)
if isinstance(self, DataFrame)
else DataFrame.from_batch(outputs)
)
outputs._visible_columns = None
return outputs
|
meerkat-main
|
meerkat/mixins/mapping.py
|
from dataclasses import dataclass
from meerkat.interactive.node import NodeMixin
from meerkat.mixins.blockable import BlockableMixin
from meerkat.mixins.identifiable import IdentifiableMixin
from meerkat.provenance import ProvenanceMixin
@dataclass
class StateClass:
"""An internal class to store the state of an object alongside its
associated class."""
klass: type
state: object
class CloneableMixin:
def __init__(self, *args, **kwargs):
super(CloneableMixin, self).__init__(*args, **kwargs)
@classmethod
def _state_keys(cls) -> set:
""""""
raise NotImplementedError()
@classmethod
def _clone_keys(cls) -> set:
return set()
def copy(self, **kwargs) -> object:
new_data = self._copy_data()
return self._clone(data=new_data)
def view(self) -> object:
return self._clone()
def _clone(self, data: object = None, **kwargs):
if data is None:
if isinstance(self, BlockableMixin) and self.is_blockable():
data = self._pack_block_view()
else:
data = self._view_data()
state = self._get_state(clone=True)
state.update(kwargs)
obj = self.__class__.__new__(self.__class__)
obj._set_state(state)
obj._set_data(data)
if isinstance(self, ProvenanceMixin):
# need to create a node for the object
obj._init_node()
if isinstance(self, IdentifiableMixin):
obj._set_id()
if isinstance(self, NodeMixin):
obj._set_inode()
# obj._set_children()
from meerkat.dataframe import DataFrame
if isinstance(obj, DataFrame):
if obj.primary_key_name not in obj:
# need to reset the primary key if we remove the column
obj.set_primary_key(None, inplace=True)
return obj
def _copy_data(self) -> object:
raise NotImplementedError
def _view_data(self) -> object:
return self.data
def _get_state(self, clone: bool = False) -> dict:
state = {key: getattr(self, key) for key in self._state_keys()}
if clone:
state.update({key: getattr(self, key) for key in self._clone_keys()})
return state
def _set_state(self, state: dict):
self.__dict__.update(state)
|
meerkat-main
|
meerkat/mixins/cloneable.py
|
import json
import os
from abc import ABC, abstractmethod
from typing import List
import meerkat as mk
from ..config import DATASETS_ENV_VARIABLE
from .info import DatasetInfo
from .utils import download_url
class DatasetBuilder(ABC):
REVISIONS: List[str]
info: DatasetInfo = None
def __init__(
self,
dataset_dir: str = None,
version: str = None,
download_mode: str = "reuse",
**kwargs,
):
self.name = self.__class__.__name__
self.version = self.VERSIONS[0] if version is None else version
self.download_mode = download_mode
self.kwargs = kwargs
if dataset_dir is None:
self.dataset_dir = self._get_dataset_dir(self.name, self.version)
self.var_dataset_dir = os.path.join(
f"${DATASETS_ENV_VARIABLE}", self.name, self.version
)
else:
self.dataset_dir = dataset_dir
self.var_dataset_dir = dataset_dir
def download_url(self, url: str):
return download_url(url, self.dataset_dir, force=self.download_mode == "force")
def dump_download_meta(self):
data = {
"name": self.name,
"version": self.version,
"dataset_dir": self.dataset_dir,
}
json.dump(
data,
open(os.path.join(self.dataset_dir, "meerkat_download.json"), "w"),
)
def __call__(self):
if self.download_mode in ["force", "extract"] or (
self.download_mode == "reuse" and not self.is_downloaded()
):
try:
self.download()
self.dump_download_meta()
except Exception as e:
raise ValueError(
f"Failed to download dataset {self.name} to {self.dataset_dir}."
f"Error: {e}"
f"Try setting `download_mode='force'` to force re-download."
)
if not self.is_downloaded():
raise ValueError(
f"Dataset {self.name} is not downloaded to {self.dataset_dir}."
)
return self.build()
@abstractmethod
def build(self):
raise NotImplementedError()
@abstractmethod
def download(self):
raise NotImplementedError()
def is_downloaded(self) -> bool:
"""This is a very weak check for the existence of the dataset.
Subclasses should ideally implement more thorough checks.
"""
return os.path.exists(self.dataset_dir)
@staticmethod
def _get_dataset_dir(name: str, version: str) -> str:
return os.path.join(mk.config.datasets.root_dir, name, version)
|
meerkat-main
|
meerkat/datasets/abstract.py
|
import functools
from typing import Any, List, Optional, Sequence
from fvcore.common.registry import Registry as _Registry
from tabulate import tabulate
from meerkat.dataframe import DataFrame
class Registry(_Registry):
"""Extension of fvcore's registry that supports aliases."""
_ALIAS_KEYWORDS = ("_aliases", "_ALIASES")
def __init__(self, name: str):
super().__init__(name=name)
self._metadata_map = {}
def get(self, name: str, **kwargs) -> Any:
ret = self._obj_map.get(name)
if ret is None:
raise KeyError(
"No object named '{}' found in '{}' registry!".format(name, self._name)
)
return ret(**kwargs)()
def get_obj(self, name: str) -> type:
return self._obj_map[name]
def _get_aliases(self, obj_func_or_class):
for kw in self._ALIAS_KEYWORDS:
if hasattr(obj_func_or_class, kw):
return getattr(obj_func_or_class, kw)
return []
def register(
self, obj: object = None, aliases: Sequence[str] = None
) -> Optional[object]:
if obj is None:
# used as a decorator
def deco(func_or_class: object, aliases=None) -> object:
name = func_or_class.__name__ # pyre-ignore
self._do_register(name, func_or_class)
if aliases is None:
aliases = self._get_aliases(func_or_class)
if not isinstance(aliases, (list, tuple, set)):
aliases = [aliases]
for alias in aliases:
self._do_register(alias, func_or_class)
return func_or_class
kwargs = {"aliases": aliases}
if any(v is not None for v in kwargs.values()):
return functools.partial(deco, **kwargs)
else:
return deco
name = obj.__name__ # pyre-ignore
self._do_register(name, obj)
if aliases is None:
aliases = self._get_aliases(obj)
for alias in aliases:
self._do_register(alias, obj)
def _do_register(self, name: str, obj: Any, **kwargs) -> None:
self._metadata_map[name] = {"name": name, "description": obj.__doc__, **kwargs}
return super()._do_register(name, obj)
@property
def names(self) -> List[str]:
return list(self._obj_map.keys())
@property
def catalog(self) -> DataFrame:
rows = []
for name, builder in self:
rows.append(builder.info.__dict__)
return DataFrame(rows)
def __repr__(self) -> str:
table = tabulate(self._metadata_map.values(), tablefmt="fancy_grid")
return "Registry of {}:\n".format(self._name) + table
datasets = Registry("datasets")
datasets.__doc__ = """Registry for datasets in meerkat"""
|
meerkat-main
|
meerkat/datasets/registry.py
|
from typing import Dict, List, Union
from meerkat.dataframe import DataFrame
from .celeba import celeba
from .coco import coco
from .expw import expw
from .fer import fer
from .gtsdb import gtsdb
from .imagenet import imagenet
from .imagenette import imagenette
from .lra import pathfinder
from .lvis import lvis
from .mimic_iii import mimic_iii
from .mirflickr import mirflickr
from .ngoa import ngoa
from .pascal import pascal
from .registry import datasets
from .rfw import rfw
from .siim_cxr import siim_cxr
from .stsd import stsd
from .torchaudio import yesno
__all__ = [
"celeba",
"imagenet",
"imagenette",
"mirflickr",
"pascal",
"lvis",
"mimic_iii",
"expw",
"fer",
"rfw",
"ngoa",
"coco",
"yesno",
"siim_cxr",
"pathfinder",
"gtsdb",
"stsd",
]
DOWNLOAD_MODES = ["force", "extract", "reuse", "skip"]
REGISTRIES = ["meerkat", "huggingface"]
def get(
name: str,
dataset_dir: str = None,
version: str = None,
download_mode: str = "reuse",
registry: str = None,
**kwargs,
) -> Union[DataFrame, Dict[str, DataFrame]]:
"""Load a dataset into .
Args:
name (str): Name of the dataset.
dataset_dir (str): The directory containing dataset data. Defaults to
`~/.meerkat/datasets/{name}`.
version (str): The version of the dataset. Defaults to `latest`.
download_mode (str): The download mode. Options are: "reuse" (default) will
download the dataset if it does not exist, "force" will download the dataset
even if it exists, "extract" will reuse any downloaded archives but
force extracting those archives, and "skip" will not download the dataset
if it doesn't yet exist. Defaults to `reuse`.
registry (str): The registry to use. If None, then checks each
supported registry in turn. Currently, supported registries
include `meerkat` and `huggingface`.
**kwargs: Additional arguments passed to the dataset.
"""
if download_mode not in DOWNLOAD_MODES:
raise ValueError(
f"Invalid download mode: {download_mode}."
f"Must pass one of {DOWNLOAD_MODES}"
)
if registry is None:
registry_order = REGISTRIES
else:
registry_order = [registry]
errors = []
for registry in registry_order:
if registry == "meerkat":
try:
dataset = datasets.get(
name=name,
dataset_dir=dataset_dir,
version=version,
download_mode=download_mode,
**kwargs,
)
return dataset
except KeyError as e:
errors.append(e)
elif registry == "huggingface":
try:
import datasets as hf_datasets
mapping = {
"force": hf_datasets.DownloadMode.FORCE_REDOWNLOAD,
"reuse": hf_datasets.DownloadMode.REUSE_DATASET_IF_EXISTS,
}
if download_mode == "skip":
raise ValueError(
"Download mode `skip` isn't supported for HuggingFace datasets."
)
# Add version argument if specified
if version is not None:
kwargs["name"] = version
dataset = DataFrame.from_huggingface(
path=name,
download_mode=mapping[download_mode],
cache_dir=dataset_dir,
**kwargs,
)
except FileNotFoundError as e:
errors.append(e)
else:
return dataset
else:
raise ValueError(
f"Invalid registry: {registry}. Must be one of {REGISTRIES}"
)
raise ValueError(
f"No dataset '{name}' found in registry. Errors:" + " ".join(errors)
)
def versions(name: str) -> List[str]:
"""Get the versions of a dataset. These can be passed to the ``version``
argument of the :func:`~meerkat.get` function.
Args:
name (str): Name of the dataset.
Returns:
List[str]: List of versions.
"""
return datasets.get_obj(name).VERSIONS
|
meerkat-main
|
meerkat/datasets/__init__.py
|
import os
import shutil
import tarfile
from meerkat import env
from meerkat.dataframe import DataFrame
from meerkat.tools.utils import requires
_IS_HF_AVAILABLE = env.package_available("huggingface_hub")
if _IS_HF_AVAILABLE:
import huggingface_hub
def download_url(url: str, dataset_dir: str, force: bool = False):
os.makedirs(dataset_dir, exist_ok=True)
from datasets.utils.file_utils import get_from_cache
return get_from_cache(
url, cache_dir=os.path.join(dataset_dir, "downloads"), force_download=force
)
def extract(path: str, dst: str, extractor: str = None):
from datasets.utils.extract import Extractor
from datasets.utils.filelock import FileLock
# Prevent parallel extractions
lock_path = path + ".lock"
with FileLock(lock_path):
# Support for older versions of datasets that have list of extractors instead
# of dict of extractors
extractors = (
Extractor.extractors
if isinstance(Extractor.extractors, list)
else Extractor.extractors.values()
)
if extractor:
return extractor.extract(path, dst)
for extractor in extractors:
if extractor.is_extractable(path):
return extractor.extract(path, dst)
raise ValueError("Extraction method not found for {}".format(path))
def download_google_drive(
url: str = None, id: str = None, dst: str = None, is_folder: bool = False
):
os.makedirs(dst, exist_ok=True)
if (url is None) == (id is None):
raise ValueError("Exactly one of url or id must be provided.")
if dst is None:
raise ValueError("dst must be provided.")
try:
import gdown
except ImportError:
raise ImportError(
"Google Drive download requires gdown. Install with `pip install gdown`."
)
if is_folder:
gdown.download_folder(url=url, id=id, output=dst)
else:
gdown.download(url=url, id=id, output=dst)
def download_df(
url: str, overwrite: bool = False, download_dir: str = None
) -> DataFrame:
"""Download a dataframe from a url.
Args:
url: The url.
overwrite: Whether to download the dataframe from the path again.
download_dir: The directory to download the dataframe to.
Returns:
DataFrame: The downloaded dataframe.
"""
if download_dir is None:
download_dir = os.path.abspath(os.path.expanduser("~/.meerkat/dataframes"))
# Handle huggingface repositories.
# Ignore tar.gz files, which we download separately.
# TODO: Consolidate so that we can
is_hf_repo = _IS_HF_AVAILABLE and isinstance(url, huggingface_hub.Repository)
if is_hf_repo:
return DataFrame.read(url)
if "huggingface.co" in url and "resolve" not in url:
return DataFrame.read(_download_huggingface_repo(url))
# A hacky way of getting the name from the url.
# This won't always work, because we could have name conflicts.
# TODO: Find a better way to do this.
local_path = url.split("/")[-1]
local_path = local_path.split(".zip")[0].split(".tar")[0]
dir_path = os.path.join(download_dir, local_path)
mode = "r:gz" if url.endswith(".tar.gz") else "r"
if overwrite and os.path.exists(dir_path):
shutil.rmtree(dir_path)
if not os.path.exists(dir_path):
cached_tar_path = download_url(
url=url,
dataset_dir=download_dir,
)
print("Extracting tar archive, this may take a few minutes...")
extract_tar_file(filepath=cached_tar_path, download_dir=download_dir, mode=mode)
os.remove(cached_tar_path)
return DataFrame.read(dir_path)
def extract_tar_file(filepath: str, download_dir: str, mode: str = None):
if mode is None:
mode = "r:gz" if filepath.endswith(".tar.gz") else "r"
tar = tarfile.open(filepath, mode=mode)
tar.extractall(download_dir)
tar.close()
@requires("huggingface_hub")
def _download_huggingface_repo(url: str) -> str:
"""Download a huggingface repository.
This function uses huggingface_hub.snapshot_download.
It does not download the repo with huggingface_hub.Repository.
Args:
url: The url of the huggingface repository.
cache: Whether to cache the downloaded repository.
Returns:
str: The downloaded path.
"""
parts = str(url).strip("/").split("/")
repo_type, user, repo_name = parts[-3:]
repo_type = repo_type.rstrip("s") # e.g. models -> model
path = huggingface_hub.snapshot_download(
repo_id=f"{user}/{repo_name}", repo_type=repo_type
)
return path
|
meerkat-main
|
meerkat/datasets/utils.py
|
from dataclasses import dataclass
@dataclass
class DatasetInfo:
name: str
full_name: str = None
description: str = None
citation: str = None
homepage: str = None
license: str = None
tags: str = None
|
meerkat-main
|
meerkat/datasets/info.py
|
from __future__ import annotations
import hashlib
import json
import os
from dataclasses import dataclass
import numpy as np
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url, extract
IMAGE_URL = "http://images.cocodataset.org/zips/{split}{version}.zip"
# flake8: noqa
LABEL_URL = "https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_{version}_{split}.json.zip"
@dataclass
class LVISDataset:
images: mk.DataFrame
annotations: mk.DataFrame
categories: mk.DataFrame
@property
def tags(self) -> mk.DataFrame:
tags = self.annotations["image_id", "category_id"].to_pandas().drop_duplicates()
tags = mk.DataFrame.from_pandas(tags).merge(
self.categories["id", "synset"], left_on="category_id", right_on="id"
)
tags["id"] = (
tags["image_id"].to_pandas().astype(str)
+ "_"
+ tags["category_id"].astype(str)
)
return tags
@datasets.register()
class lvis(DatasetBuilder):
VERSIONS = ["v1"]
info = DatasetInfo(
name="lvis",
full_name="Common Objects in Context",
description="A dataset for Large Vocabulary Instance Segmenation (LVIS).",
homepage="https://www.lvisdataset.org/",
tags=["image", "object recognition"],
# flake8: noqa
citation="@inproceedings{gupta2019lvis,title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation},author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross},booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition},year={2019}}",
)
def __init__(
self,
dataset_dir: str = None,
version: str = None,
download_mode: str = "reuse",
include_segmentations: bool = False,
**kwargs,
):
super().__init__(
dataset_dir=dataset_dir,
version=version,
download_mode=download_mode,
)
self.include_segmentations = include_segmentations
def build(self):
out = self._read_build_cache()
if out is not None:
return out
image_dfs = []
annot_dfs = []
cat_dfs = []
for split in ["train", "val"]:
dct = json.load(
open(
os.path.join(self.dataset_dir, f"lvis_v1_{split}.json"),
"rb",
)
)
image_df = mk.DataFrame(dct["images"])
image_df["split"] = split
image_dfs.append(image_df)
annot_df = mk.DataFrame(dct["annotations"])
if not self.include_segmentations:
annot_df.remove_column("segmentation")
# Creating a numpy array from the raw data is much faster than
# iterating through the column.
# TODO: Consider adding the __array__ protocol for the abstract column.
annot_df["bbox"] = np.array(annot_df["bbox"].data)
annot_dfs.append(annot_df)
cat_dfs.append(mk.DataFrame(dct["categories"]))
image_df = mk.concat(image_dfs, axis=0)
# need to merge in image column from 2014 COCO because splits are not the same
# with 2017. Eventually, we should just require that people download 2017.
coco_df = mk.get("coco", version="2014")
image_df = image_df.merge(coco_df["id", "image"], on="id")
image_df["image"].base_dir = self.var_dataset_dir
image_df.data.reorder(
["id", "image"] + [c for c in image_df.columns if c not in ["id", "image"]]
)
annot_df = mk.concat(annot_dfs, axis=0)
cat_df = mk.concat(cat_dfs, axis=0)
cat_df = cat_dfs[0]
cat_df["val_image_count"] = cat_dfs[1]["image_count"]
cat_df["val_instance_count"] = cat_dfs[1]["instance_count"]
cat_df["train_image_count"] = cat_dfs[0]["image_count"]
cat_df["train_instance_count"] = cat_dfs[0]["instance_count"]
cat_df.remove_column("image_count")
cat_df.remove_column("instance_count")
out = LVISDataset(
images=image_df,
annotations=annot_df,
categories=cat_df,
)
self._write_build_cache(out)
return out
def _build_cache_hash(self):
return hashlib.sha1(
json.dumps({"include_segmentations": self.include_segmentations}).encode(
"utf-8"
)
).hexdigest()[:10]
def _build_cache_path(self, key: str):
dir_path = os.path.join(self.dataset_dir, "build_cache")
os.makedirs(dir_path, exist_ok=True)
return os.path.join(dir_path, f"out_{key}_{self._build_cache_hash()}.mk")
def _read_build_cache(self) -> LVISDataset:
out = {}
for k in ["images", "annotations", "categories"]:
path = self._build_cache_path(k)
if not os.path.exists(path):
return None
out[k] = mk.DataFrame.read(path)
return LVISDataset(**out)
def _write_build_cache(self, out: LVISDataset):
for k in ["images", "annotations", "categories"]:
df = getattr(out, k)
df.write(self._build_cache_path(k))
def download(self):
os.makedirs(self.dataset_dir, exist_ok=True)
coco_dataset_dir = self._get_dataset_dir(name="coco", version="2014")
if not os.path.exists(coco_dataset_dir):
raise ValueError(
"LVIS depends on COCO 2014, please download it first with "
"`mk.get('coco', version='2014')`."
)
for split in ["train", "val", "test"]:
lvis_path = os.path.join(self.dataset_dir, f"{split}2014")
if not (os.path.exists(lvis_path)):
os.symlink(
os.path.join(self._get_dataset_dir("coco", "2014"), f"{split}2014"),
lvis_path,
)
if split != "test":
# no test labels
downloaded_path = download_url(
LABEL_URL.format(version=self.version, split=split),
self.dataset_dir,
)
extract(downloaded_path, self.dataset_dir)
|
meerkat-main
|
meerkat/datasets/lvis/__init__.py
|
import os
import shutil
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url, extract
@datasets.register()
class pascal(DatasetBuilder):
VERSIONS = ["2012"]
VERSION_TO_URL = {
# flake8: noqa
"2012": "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar"
}
info = DatasetInfo(
name="pascal",
full_name="PASCAL",
description="Image data sets for object class recognition.",
homepage="http://host.robots.ox.ac.uk/pascal/VOC/",
tags=["image", "object recognition"],
citation=(
"@Article{Everingham10,"
'author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn,'
'J. and Zisserman, A.",'
'title = "The Pascal Visual Object Classes (VOC) Challenge",'
'journal = "International Journal of Computer Vision",'
'volume = "88",'
'year = "2010",'
'number = "2",'
"month = jun,"
'pages = "303--338",}'
),
)
def build(self):
if self.version == "2012":
return build_pascal_2012_df(dataset_dir=self.dataset_dir)
else:
raise ValueError()
def download(self):
url = self.VERSION_TO_URL[self.version]
downloaded_path = download_url(url, self.dataset_dir)
extract(downloaded_path, os.path.join(self.dataset_dir, "tmp"))
shutil.move(
os.path.join(self.dataset_dir, "tmp/VOCdevkit"),
os.path.join(self.dataset_dir, "VOCdevkit"),
)
def build_pascal_2012_df(dataset_dir: str):
base_dir = os.path.join(dataset_dir, "VOCdevkit/VOC2012")
train_df = mk.DataFrame.from_csv(
os.path.join(base_dir, "ImageSets/Main/train.txt"), header=None, names=["id"]
)
train_df["split"] = ["train"] * len(train_df)
val_df = mk.DataFrame.from_csv(
os.path.join(base_dir, "ImageSets/Main/val.txt"), header=None, names=["id"]
)
val_df["split"] = ["val"] * len(val_df)
df = mk.concat([train_df, val_df], axis=0)
# create filename column
df["file_name"] = df["id"] + ".jpg"
df["image"] = mk.ImageColumn.from_filepaths(
df["file_name"], base_dir=os.path.join(base_dir, "JPEGImages")
)
for class_name in PASCAL_CLASSES:
label_df = mk.DataFrame.from_csv(
os.path.join(
base_dir, f"ImageSets/Main/{class_name}_trainval.txt".format(class_name)
),
header=None,
delimiter=" ?",
names=["id", class_name],
engine="python",
)
label_df[class_name] = (label_df[class_name] == 1).astype(int)
df = df.merge(label_df, on="id", validate="one_to_one")
return df
PASCAL_CLASSES = [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedflant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
|
meerkat-main
|
meerkat/datasets/pascal/__init__.py
|
"""Datasets for Long Range Arena (LRA) Pathfinder task."""
import itertools
import os
import pandas as pd
import meerkat as mk
from meerkat.tools.lazy_loader import LazyLoader
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url
tf = LazyLoader("tensorflow")
_RESOLUTIONS = ["32px", "64px", "128px", "256px"]
DIFFICULTY = {
"curv_baseline": "easy",
"curv_contour_length_9": "medium",
"curv_contour_length_14": "hard",
}
_VERSIONS = _RESOLUTIONS + [
f"{res}/{difficulty}"
for res, difficulty in itertools.product(_RESOLUTIONS, DIFFICULTY.values())
]
@datasets.register()
class pathfinder(DatasetBuilder):
"""Long Range Arena (LRA) Pathfinder dataset."""
VERSIONS = _VERSIONS
info = DatasetInfo(
name="pathfinder",
full_name="Long Range Arena (LRA) Pathfinder",
description=(
"Image data set to determine whether two points represented "
"as circles are connected by a path consisting of dashes."
),
homepage="https://github.com/google-research/long-range-arena",
tags=["image", "classification"],
citation=None,
)
def build(self):
"""Get the processed dataframe hosted on huggingface."""
difficulty = self.version.split("/")[-1] if "/" in self.version else None
df = mk.DataFrame.read(
self._get_huggingface_path(), overwrite=self.download_mode == "force"
)
if difficulty is not None:
df = df[df["difficulty"] == difficulty]
df["image"] = mk.files(df["path"], base_dir=self.dataset_dir, type="image")
df.set_primary_key("path", inplace=True)
return df
def download(self):
if not os.path.exists(self.dataset_dir):
raise ValueError(
f"Dataset {self.name} is not downloaded to {self.dataset_dir}. "
"Please download the dataset from "
"https://github.com/google-research/long-range-arena. "
f"Extract the dataset to {self.dataset_dir}."
)
def is_downloaded(self):
return os.path.exists(self.dataset_dir)
def download_url(self, url: str):
"""Download files to a meerkat folder in the lra directory."""
output_dir = os.path.join(self.dataset_dir, "meerkat")
os.makedirs(output_dir, exist_ok=True)
return download_url(url, output_dir, force=self.download_mode == "force")
def _build_from_raw(self):
"""Build the dataset from the raw tensorflow files.
This requires having tensorflow installed.
"""
dfs = []
difficulty = self.version.split("/")[-1] if "/" in self.version else None
for subfolder in os.listdir(self.dataset_dir):
# If a difficulty is specified, only include the specified difficulty.
if difficulty is not None and DIFFICULTY[subfolder] != difficulty:
continue
dirpath = os.path.join(self.dataset_dir, subfolder, "")
df = pd.DataFrame(_extract_metadata(dirpath, base_path=subfolder))
df["subfolder"] = subfolder
df["difficulty"] = DIFFICULTY[subfolder]
dfs.append(df)
# Concatenate the dataframes.
df = pd.concat(dfs, axis=0)
df = df.reset_index(drop=True)
df = mk.DataFrame.from_pandas(df)
df = df.drop("index")
df["image"] = mk.files(df["path"], base_dir=self.dataset_dir, type="image")
df.set_primary_key("path", inplace=True)
return df
@staticmethod
def _get_dataset_dir(name: str, version: str) -> str:
"""self.dataset_dir will be:
<root_dir>/lra_release/lra_release/pathfinder<res> e.g.
/home/user/.meerkat/datasets/lra_release/lra_release/pathfinder32.
"""
res = _get_resolution(version)
return os.path.join(
mk.config.datasets.root_dir,
"lra_release",
"lra_release",
f"pathfinder{res}",
)
def _get_huggingface_path(self):
"""Get path to the meerkat DataFrame uploaded to huggingface."""
res = _get_resolution(self.version)
return f"https://huggingface.co/datasets/meerkat-ml/pathfinder/resolve/main/pathfinder{res}.mk.tar.gz" # noqa: E501
def _extract_metadata(dirpath, base_path: str):
"""Extract the filepath and label from the metadata file.
Example metadata:
['imgs/43', 'sample_0.png', '0', '0', '1.8', '6', '2.0', '5', '1.5', '2', '1']
Args:
file_path: Path to the metadata file.
"""
metadata_dir = os.path.join(dirpath, "metadata")
image_paths = []
labels = []
for metadata_file in os.listdir(metadata_dir):
file_path = os.path.join(metadata_dir, metadata_file)
meta_examples = (
tf.io.read_file(file_path).numpy().decode("utf-8").split("\n")[:-1]
)
for m_example in meta_examples:
m_example = m_example.split(" ")
image_paths.append(os.path.join(base_path, m_example[0], m_example[1]))
labels.append(int(m_example[3]))
return {"path": image_paths, "label": labels}
def _get_resolution(version: str):
"""Get the resolution from the version string."""
return version.split("/")[0].split("px")[0]
|
meerkat-main
|
meerkat/datasets/lra/pathfinder.py
|
from .pathfinder import pathfinder
__all__ = ["pathfinder"]
|
meerkat-main
|
meerkat/datasets/lra/__init__.py
|
import os
import subprocess
import PIL
import meerkat as mk
from meerkat.columns.deferred.image import load_image
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
def _write_empty_image(dst):
img = PIL.Image.new("RGB", (32, 32), color="black")
img.save(dst, format="JPEG")
@datasets.register()
class mimic_iii(DatasetBuilder):
VERSIONS = ["main"]
info = DatasetInfo(
name="mimic-iii",
full_name="MIMIC-III",
# flake8: noqa
description="MIMIC-III (Medical Information Mart for Intensive Care III) is a large, freely-available database comprising deidentified health-related data associated with over forty thousand patients who stayed in critical care units of the Beth Israel Deaconess Medical Center between 2001 and 2012.",
homepage="https://mimic.mit.edu/docs/iii/",
tags=["medicine"],
citation=None,
)
def build(self):
pass
def download(self):
# clone the repo using subprocess
# "wget -r -N -c -np --user seyuboglu --ask-password https://physionet.org/files/mimiciii/1.4/"
subprocess.call(
[
"wget",
"-r",
"-N",
"-c",
"-np",
"-P",
self.dataset_dir,
"--ask-user",
"seyuboglu",
"--ask-password",
"https://physionet.org/files/mimiciii/1.4/",
]
)
|
meerkat-main
|
meerkat/datasets/mimic_iii/__init__.py
|
import os
import subprocess
import PIL
import meerkat as mk
from meerkat.columns.deferred.image import load_image
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
REPO = "https://github.com/NationalGalleryOfArt/opendata.git"
def _write_empty_image(dst):
img = PIL.Image.new("RGB", (32, 32), color="black")
img.save(dst, format="JPEG")
@datasets.register()
class ngoa(DatasetBuilder):
VERSIONS = ["main"]
info = DatasetInfo(
name="ngoa",
full_name="National Gallery of Art Open Data",
# flake8: noqa
description="The dataset provides data records relating to the 130,000+ artworks in our collection and the artists who created them. You can download the dataset free of charge without seeking authorization from the National Gallery of Art.",
homepage="https://github.com/NationalGalleryOfArt/opendata",
tags=["art"],
citation=None,
)
def build(self):
base_dir = os.path.join(self.dataset_dir, "data")
db = {}
db["objects"] = mk.DataFrame.from_csv(
os.path.join(base_dir, "objects.csv"), low_memory=False
)
db["published_images"] = mk.DataFrame.from_csv(
os.path.join(base_dir, "published_images.csv"),
)
db["published_images"]["image"] = mk.ImageColumn.from_filepaths(
db["published_images"]["iiifthumburl"],
loader=mk.FileLoader(
downloader="url",
loader=load_image,
# replace images for which the download fails with a black image
fallback_downloader=_write_empty_image,
cache_dir=os.path.join(base_dir, "iiifthumburl"),
),
)
db["published_images"]["image_224"] = mk.ImageColumn.from_filepaths(
db["published_images"]["iiifurl"].apply(
lambda x: f"{x}/full/!224,224/0/default.jpg"
),
loader=mk.FileLoader(
downloader="url",
loader=load_image,
cache_dir=os.path.join(base_dir, "iiifimage"),
# replace images for which the download fails with a black image
fallback_downloader=_write_empty_image,
),
)
db["objects_constituents"] = mk.DataFrame.from_csv(
os.path.join(base_dir, "objects_constituents.csv"),
)
db["constituents"] = mk.DataFrame.from_csv(
os.path.join(base_dir, "constituents.csv"),
)
db["constituents_text_entries"] = mk.DataFrame.from_csv(
os.path.join(base_dir, "constituents_text_entries.csv"),
)
db["locations"] = mk.DataFrame.from_csv(
os.path.join(base_dir, "locations.csv"),
)
db["objects_text_entries"] = mk.DataFrame.from_csv(
os.path.join(base_dir, "objects_text_entries.csv"),
)
return db
def download(self):
# clone the repo using subprocess
subprocess.call(["git", "clone", REPO, self.dataset_dir])
|
meerkat-main
|
meerkat/datasets/ngoa/__init__.py
|
# Swedish Traffic Signs Dataset (STSD)
import os
import pandas as pd
from tqdm.auto import tqdm
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url, extract
_SETS_TO_URLS = {
"Set1/annotations.txt": "http://www.isy.liu.se/cvl/research/trafficSigns/swedishSignsSummer/Set1/annotations.txt", # noqa: E501
"Set2/annotations.txt": "http://www.isy.liu.se/cvl/research/trafficSigns/swedishSignsSummer/Set2/annotations.txt", # noqa: E501
"Set1/Set1Part0": "http://www.isy.liu.se/cvl/research/trafficSigns/swedishSignsSummer/Set1/Set1Part0.zip", # noqa: E501
"Set2/Set2Part0": "http://www.isy.liu.se/cvl/research/trafficSigns/swedishSignsSummer/Set2/Set2Part0.zip", # noqa: E501
}
@datasets.register()
class stsd(DatasetBuilder):
"""Swedish Traffic Sign Dataset STSD."""
VERSIONS = ["2019"]
info = DatasetInfo(
name="stsd",
full_name="Swedish Traffic Sign Dataset STSD",
description=("Image data set to detect street signs."),
homepage="https://www.cvl.isy.liu.se/en/research/datasets/traffic-signs-dataset/download/", # noqa: E501
tags=["image", "object recognition"],
citation=None,
)
def build(self):
"""Get the processed dataframe hosted on huggingface."""
annotations = []
for set_name in ["Set1", "Set2"]:
ann_file = os.path.join(self.dataset_dir, f"{set_name}/annotations.txt")
df = _format_annotations(ann_file)
df["path"] = df["filename"].apply(
lambda x: os.path.join(
self.dataset_dir, set_name, f"{set_name}Part0", x
)
)
annotations.append(df)
annotations = pd.concat(annotations).reset_index(drop=True)
df = pd.DataFrame(annotations)
df = mk.DataFrame.from_pandas(df).drop("index")
df["image"] = mk.files(
df["path"],
type="image",
)
df["image_crop"] = mk.defer(df, crop)
return df
def download(self):
for relative_path, url in tqdm(_SETS_TO_URLS.items(), verbose=True):
downloaded_path = download_url(url, self.dataset_dir)
path = os.path.join(self.dataset_dir, relative_path)
if url.endswith(".zip"):
os.makedirs(path, exist_ok=True)
extract(downloaded_path, path)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
os.rename(downloaded_path, path)
def is_downloaded(self):
return os.path.exists(self.dataset_dir) and all(
os.path.exists(os.path.join(self.dataset_dir, x)) for x in _SETS_TO_URLS
)
def crop(image, x1, y1, x2, y2):
# Don't crop the image if the crop coordinates aren't valid.
if any(v == -1 for v in [x1, y1, x2, y2]):
return image.copy()
out = image.crop((x1, y1, x2, y2))
return out
def _format_annotations(ann_file):
annotations = []
with open(ann_file, "r") as f:
lines = f.readlines()
for line in lines:
filename, anns = (x.strip() for x in line.split(":"))
for ann in anns.split(";"):
ann = ann.strip()
if len(ann) == 0:
continue
if ann == "MISC_SIGNS":
annotations.append(
{
"filename": filename,
"visibility": "N/A",
"x1": -1,
"y1": -1,
"x2": -1,
"y2": -1,
"sign_type": "MISC_SIGNS",
"category": "MISC_SIGNS",
}
)
continue
visibility, x2, y2, x1, y1, sign_type, name = (
x.strip() for x in ann.split(",")
)
# Annotation file is malformed for this example.
x2, y2, x1, y1 = [
x.split("l")[0] if "l" in x else x for x in [x2, y2, x1, y1]
]
annotations.append(
{
"filename": filename,
"visibility": visibility,
"x1": float(x1),
"y1": float(y1),
"x2": float(x2),
"y2": float(y2),
"sign_type": sign_type,
"category": name,
}
)
return pd.DataFrame(annotations)
|
meerkat-main
|
meerkat/datasets/stsd/__init__.py
|
import os
from typing import Dict, Mapping
import ujson as json
import meerkat as mk
# TODO (Sabri): Add support for downloading the data.
# For the time being, relevant files should be downloaded to the directory here
# https://visualgenome.org/VGViz/explore
def build_visual_genome_dfs(
dataset_dir: str, write: bool = False
) -> Dict[str, mk.DataFrame]:
dfs = {}
print("Loading objects and attributes...")
dfs.update(_build_object_dfs(dataset_dir))
print("Loading images...")
dfs.update(_build_image_df(dataset_dir=dataset_dir))
print("Loading relationships...")
dfs.update(_build_relationships_df(dataset_dir=dataset_dir))
if write:
write_visual_genome_dfs(dfs, dataset_dir=dataset_dir)
return dfs
def read_visual_genome_dfs(dataset_dir: str) -> Dict[str, mk.DataFrame]:
return {
key: mk.DataFrame.read(os.path.join(dataset_dir, f"{key}.mk"))
for key in ["attributes", "relationships", "objects", "images"]
}
def write_visual_genome_dfs(dfs: Mapping[str, mk.DataFrame], dataset_dir: str):
for key, df in dfs.items():
df.write(os.path.join(dataset_dir, f"{key}.mk"))
def _build_object_dfs(dataset_dir: str):
with open(os.path.join(dataset_dir, "attributes.json")) as f:
objects = json.load(f)
objects_df = [] # create one table for objects
attributes_df = [] # create one table of attributes
for image in objects:
for obj in image["attributes"]:
obj["image_id"] = image["image_id"]
# all names are length 1
names = obj.pop("names")
obj["name"] = names[0]
# add attributes to the table
attributes = obj.pop("attributes", None)
if attributes is not None:
for attribute in attributes:
attributes_df.append(
{"object_id": obj["object_id"], "attribute": attribute}
)
# the vast majority of objects (99.7%) have 0 or 1 synonym in their
# synset, so we only consider the first synonym to keep things simple
synset = obj.pop("synsets")
obj["syn_name"] = synset[0] if len(synset) > 0 else ""
objects_df.append(obj)
return {
"objects": mk.DataFrame(objects_df),
"attributes": mk.DataFrame(attributes_df),
}
def _build_image_df(dataset_dir: str):
with open(os.path.join(dataset_dir, "image_data.json")) as f:
images = json.load(f)
image_df = mk.DataFrame(images)
image_df.remove_column("coco_id")
image_df.remove_column("flickr_id")
image_df["local_path"] = dataset_dir + (image_df["url"].str.split("rak248")).apply(
lambda x: x[-1]
)
image_df["image"] = mk.ImageColumn(image_df["local_path"])
return {"images": image_df}
def _build_relationships_df(dataset_dir: str):
with open(os.path.join(dataset_dir, "relationships.json")) as f:
relationships = json.load(f)
rel_df = []
for image in relationships:
image_id = image["image_id"]
for r in image["relationships"]:
object_synset = r["object"]["synsets"]
subject_synset = r["subject"]["synsets"]
rel_df.append(
{
"image_id": image_id,
"predicate": r["predicate"],
"subject_object_id": r["subject"]["object_id"],
"subject_name": r["subject"]["name"]
if "name" in r["subject"]
else r["subject"]["names"][0],
"subject_syn": subject_synset[0] if len(subject_synset) > 0 else "",
"object_object_id": r["object"]["object_id"],
"object_name": r["object"]["name"]
if "name" in r["object"]
else r["object"]["names"][0],
"object_syn": object_synset[0] if len(object_synset) > 0 else "",
}
)
rel_df = mk.DataFrame(rel_df)
return {"relationships": rel_df}
|
meerkat-main
|
meerkat/datasets/visual_genome/__init__.py
|
import os
import pandas as pd
from wilds import get_dataset
from wilds.datasets.wilds_dataset import WILDSDataset
import meerkat as mk
# flake8: noqa
URL = "http://worksheets.codalab.org/rest/bundles/0x505056d5cdea4e4eaa0e242cbfe2daa4/contents/blob/"
def build_waterbirds_df(
dataset_dir: str,
download: bool = True,
):
"""Download and load the Waterbirds dataset.
Args:
download_dir (str): The directory to save to.
Returns:
a DataFrame containing columns `image`, `y`, "background", and `split`,
References:
"""
dataset = get_dataset(dataset="waterbirds", root_dir=dataset_dir, download=download)
df = pd.DataFrame(dataset.metadata_array, columns=dataset.metadata_fields)
df["filepath"] = dataset._input_array
df = mk.DataFrame.from_pandas(df)
df["image"] = mk.ImageColumn(
df["filepath"], base_dir=os.path.join(dataset_dir, "waterbirds_v1.0")
)
df["split"] = pd.Series(dataset._split_array).map(
{
v: k if k != "val" else "valid"
for k, v in WILDSDataset.DEFAULT_SPLITS.items()
}
)
backgrounds = dataset._metadata_map["background"]
birds = dataset._metadata_map["y"]
group_mapping = {
f"{bird_idx}{bground_idx}": f"{birds[bird_idx]}-{backgrounds[bground_idx]}"
for bird_idx in [0, 1]
for bground_idx in [0, 1]
}
df["group"] = (df["y"].astype(str) + df["background"].data.astype(str)).map(
group_mapping
)
return df
|
meerkat-main
|
meerkat/datasets/waterbirds/__init__.py
|
import random
from PIL import Image
import meerkat as mk
def get_torchvision_dataset(dataset_name, download_dir, is_train):
return NotImplemented
# dataset = torchvision.datasets.__dict__[dataset_name.upper()](
# root=download_dir,
# train=is_train,
# download=True,
# )
def get_cifar10(download_dir: str, frac_val: float = 0.0, download: bool = True):
"""Load CIFAR10 as a Meerkat DataFrame.
Args:
download_dir: download directory
frac_val: fraction of training set to use for validation
Returns:
a DataFrame containing columns `raw_image`, `image` and `label`
"""
from torchvision.datasets.cifar import CIFAR10
dfs = []
for split in ["train", "test"]:
dataset = CIFAR10(
root=download_dir,
train=split == "train",
download=download,
)
df = mk.DataFrame(
{
"raw_image": dataset.data,
"label": mk.TorchTensorColumn(dataset.targets),
}
)
if split == "train" and frac_val > 1e-4:
# sample indices for splitting off val
val_indices = set(
random.sample(
range(len(df)),
int(frac_val * len(df)),
)
)
df["split"] = [
"train" if i not in val_indices else "val" for i in range(len(df))
]
else:
df["split"] = [split] * len(dataset)
dfs.append(df)
df = mk.concat(dfs)
df["image"] = mk.DeferredColumn(df["raw_image"], Image.fromarray)
return df
|
meerkat-main
|
meerkat/datasets/torchvision/__init__.py
|
import os
import tarfile
import pandas as pd
import meerkat as mk
from ..abstract import DatasetBuilder
from ..info import DatasetInfo
from ..registry import datasets
from ..utils import download_url, extract
ID_TO_WORDS = {
"n02979186": "cassette player",
"n03417042": "garbage truck",
"n01440764": "tench",
"n02102040": "english springer spaniel",
"n03028079": "church",
"n03888257": "parachute",
"n03394916": "french horn",
"n03000684": "chainsaw",
"n03445777": "golf ball",
"n03425413": "gas pump",
}
ID_TO_IDX = {
"n02979186": 482,
"n03417042": 569,
"n01440764": 0,
"n02102040": 217,
"n03028079": 497,
"n03888257": 701,
"n03394916": 566,
"n03000684": 491,
"n03445777": 574,
"n03425413": 571,
}
@datasets.register()
class imagenette(DatasetBuilder):
VERSION_TO_URL = {
"full": "https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz",
"320px": "https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz",
"160px": "https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-160.tgz",
}
VERSIONS = ["full", "320px", "160px"]
info = DatasetInfo(
name="imagenette",
full_name="ImageNette",
description=(
"Imagenette is a subset of 10 easily classified classes from Imagenet "
"(tench, English springer, cassette player, chain saw, church, "
"French horn, garbage truck, gas pump, golf ball, parachute)."
),
homepage="https://github.com/fastai/imagenette",
tags=["image", "classification"],
)
@property
def data_dir(self):
return os.path.join(
self.dataset_dir,
self.VERSION_TO_URL[self.version].split("/")[-1].split(".")[0],
)
def build(self):
df = self._build_df()
df = mk.DataFrame.from_pandas(df)
df["img"] = mk.files(df["img_path"], base_dir=self.data_dir, type="image")
df.set_primary_key("img_id", inplace=True)
return df
def download(self):
url = self.VERSION_TO_URL[self.version]
path = self.download_url(url)
extract(path, self.dataset_dir)
def _build_df(
self,
):
csv_path = os.path.join(self.data_dir, "noisy_imagenette.csv")
df = pd.read_csv(csv_path)
df["label_id"] = df["noisy_labels_0"]
df["label"] = df["label_id"].replace(ID_TO_WORDS)
df["label_idx"] = df["label_id"].replace(ID_TO_IDX)
df["split"] = df["is_valid"].replace({False: "train", True: "valid"})
df["img_path"] = df.path
df["img_id"] = df["path"].apply(lambda x: x.split("/")[-1].split(".")[0])
return df
def download_imagenette(
download_dir,
version="160px",
overwrite: bool = False,
return_df: bool = False,
):
"""Download Imagenette dataset.
Args:
download_dir (str): The directory path to save to.
version (str, optional): Imagenette version.
Choices: ``"full"``, ``"320px"``, ``"160px"``.
overwrite (bool, optional): If ``True``, redownload the dataset.
return_df (bool, optional): If ``True``, return a ``pd.DataFrame``.
Returns:
Union[str, pd.DataFrame]: If ``return_df=True``, returns a pandas DataFrame.
Otherwise, returns the directory path where the data is stored.
References:
https://github.com/fastai/imagenette
"""
tar_path = os.path.join(
download_dir, os.path.basename(imagenette.VERSION_TO_URL[version])
)
dir_path = os.path.splitext(tar_path)[0]
csv_path = os.path.join(dir_path, "imagenette.csv")
if not overwrite and os.path.isfile(csv_path):
return (pd.read_csv(csv_path), dir_path) if return_df else dir_path
if overwrite or not os.path.exists(dir_path):
cached_tar_path = download_url(
url=imagenette.VERSION_TO_URL[version],
dataset_dir=download_dir,
)
print("Extracting tar archive, this may take a few minutes...")
tar = tarfile.open(cached_tar_path)
tar.extractall(download_dir)
tar.close()
# os.remove(tar_path)
else:
print(f"Directory {dir_path} already exists. Skipping download.")
# build dataframe
df = pd.read_csv(os.path.join(dir_path, "noisy_imagenette.csv"))
df["label_id"] = df["noisy_labels_0"]
df["label"] = df["label_id"].replace(ID_TO_WORDS)
df["label_idx"] = df["label_id"].replace(ID_TO_IDX)
df["split"] = df["is_valid"].replace({False: "train", True: "valid"})
df["img_path"] = df.path
df[["label", "split", "img_path"]].to_csv(csv_path, index=False)
def build_imagenette_df(
dataset_dir: str,
download: bool = False,
version: str = "160px",
) -> mk.DataFrame:
"""Build DataFrame for the Imagenette dataset.
Args:
download_dir (str): The directory path to save to or load from.
version (str, optional): Imagenette version.
Choices: ``"full"``, ``"320px"``, ``"160px"``.
overwrite (bool, optional): If ``True``, redownload the datasets.
Returns:
mk.DataFrame: A DataFrame corresponding to the dataset.
References:
https://github.com/fastai/imagenette
"""
if download:
df, dir_path = download_imagenette(
dataset_dir, version=version, overwrite=False, return_df=True
)
else:
csv_path = os.path.join(dataset_dir, "imagenette.csv")
if not os.path.isfile(csv_path):
raise ValueError("Imagenette is not downloaded. Pass `download=True`.")
df = pd.read_csv(csv_path)
df = mk.DataFrame.from_pandas(df)
df["img"] = mk.ImageColumn.from_filepaths(df["img_path"], base_dir=dir_path)
return df
|
meerkat-main
|
meerkat/datasets/imagenette/__init__.py
|
from meerkat.tools.lazy_loader import LazyLoader
torch = LazyLoader("torch")
transforms = LazyLoader("torchvision.transforms")
transformers = LazyLoader("transformers")
def initialize_transform(transform_name, config, dataset):
if transform_name is None:
return None
elif transform_name == "bert":
return initialize_bert_transform(config)
elif transform_name == "image_base":
return initialize_image_base_transform(config, dataset)
elif transform_name == "image_resize_and_center_crop":
return initialize_image_resize_and_center_crop_transform(config, dataset)
elif transform_name == "poverty_train":
return initialize_poverty_train_transform()
else:
raise ValueError(f"{transform_name} not recognized")
def initialize_bert_transform(config):
assert "bert" in config.model
assert config.max_token_length is not None
tokenizer = getBertTokenizer(config.model)
def transform(text):
tokens = tokenizer(
text,
padding="max_length",
truncation=True,
max_length=config.max_token_length,
return_tensors="pt",
)
if config.model == "bert-base-uncased":
x = torch.stack(
(
tokens["input_ids"],
tokens["attention_mask"],
tokens["token_type_ids"],
),
dim=2,
)
elif config.model == "distilbert-base-uncased":
x = torch.stack((tokens["input_ids"], tokens["attention_mask"]), dim=2)
x = torch.squeeze(x, dim=0) # First shape dim is always 1
return x
return transform
def getBertTokenizer(model):
if model == "bert-base-uncased":
tokenizer = transformers.BertTokenizerFast.from_pretrained(model)
elif model == "distilbert-base-uncased":
tokenizer = transformers.DistilBertTokenizerFast.from_pretrained(model)
else:
raise ValueError(f"Model: {model} not recognized.")
return tokenizer
def initialize_image_base_transform(config, dataset):
transform_steps = []
if dataset.original_resolution is not None and min(
dataset.original_resolution
) != max(dataset.original_resolution):
crop_size = min(dataset.original_resolution)
transform_steps.append(transforms.CenterCrop(crop_size))
if config.target_resolution is not None and config.dataset != "fmow":
transform_steps.append(transforms.Resize(config.target_resolution))
transform_steps += [
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
transform = transforms.Compose(transform_steps)
return transform
def initialize_image_resize_and_center_crop_transform(config, dataset):
"""Resizes the image to a slightly larger square then crops the center."""
assert dataset.original_resolution is not None
assert config.resize_scale is not None
scaled_resolution = tuple(
int(res * config.resize_scale) for res in dataset.original_resolution
)
if config.target_resolution is not None:
target_resolution = config.target_resolution
else:
target_resolution = dataset.original_resolution
transform = transforms.Compose(
[
transforms.Resize(scaled_resolution),
transforms.CenterCrop(target_resolution),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
return transform
def initialize_poverty_train_transform():
transforms_ls = [
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=0.8, contrast=0.8, saturation=0.8, hue=0.1),
transforms.ToTensor(),
]
rgb_transform = transforms.Compose(transforms_ls)
def transform_rgb(img):
# bgr to rgb and back to bgr
img[:3] = rgb_transform(img[:3][[2, 1, 0]])[[2, 1, 0]]
return img
transform = transforms.Lambda(lambda x: transform_rgb(x))
return transform
|
meerkat-main
|
meerkat/datasets/wilds/transforms.py
|
"""WILDS configuration defaults and operations.
All default configurations are integrated from the WILDS repository: htt
ps://github.com/p-lambda/wilds/blob/ca7e4fd345aa4f998b8322191b083444d85e
2a08/examples/configs/datasets.py
"""
base_config = {
"split_scheme": None,
"dataset_kwargs": {},
"download": False,
"frac": 1.0,
"version": None,
"loader_kwargs": {},
"train_loader": None,
"uniform_over_groups": None,
"distinct_groups": None,
"n_groups_per_batch": None,
"batch_size": None,
"eval_loader": "standard",
"model": None,
"model_kwargs": {},
"train_transform": None,
"eval_transform": None,
"target_resolution": None,
"resize_scale": None,
"max_token_length": None,
"loss_function": None,
"groupby_fields": None,
"group_dro_step_size": None,
"coral_penalty_weight": None,
"irm_lambda": None,
"irm_penalty_anneal_iters": None,
"algo_log_metric": None,
"val_metric": None,
"val_metric_decreasing": None,
"n_epochs": None,
"optimizer": None,
"lr": None,
"weight_decay": None,
"max_grad_norm": None,
"optimizer_kwargs": {},
"scheduler": None,
"scheduler_kwargs": {},
"scheduler_metric_split": "val",
"scheduler_metric_name": None,
"process_outputs_function": None,
"evaluate_all_splits": True,
"eval_splits": [],
"eval_only": False,
"eval_epoch": None,
"device": 0,
"seed": 0,
"log_dir": "./logs",
"log_every": 50,
"save_step": None,
"save_best": True,
"save_last": True,
"no_group_logging": None,
"use_wandb": False,
"progress_bar": False,
"resume": False,
}
dataset_defaults = {
"amazon": {
"split_scheme": "official",
"model": "distilbert-base-uncased",
"train_transform": "bert",
"eval_transform": "bert",
"max_token_length": 512,
"loss_function": "cross_entropy",
"algo_log_metric": "accuracy",
"batch_size": 8,
"lr": 1e-5,
"weight_decay": 0.01,
"n_epochs": 3,
"n_groups_per_batch": 2,
"irm_lambda": 1.0,
"coral_penalty_weight": 1.0,
"loader_kwargs": {
"num_workers": 1,
"pin_memory": True,
},
"process_outputs_function": "multiclass_logits_to_pred",
},
"bdd100k": {
"split_scheme": "official",
"model": "resnet50",
"model_kwargs": {"pretrained": True},
"loss_function": "multitask_bce",
"val_metric": "acc_all",
"val_metric_decreasing": False,
"optimizer": "SGD",
"optimizer_kwargs": {"momentum": 0.9},
"batch_size": 32,
"lr": 0.001,
"weight_decay": 0.0001,
"n_epochs": 10,
"algo_log_metric": "multitask_binary_accuracy",
"train_transform": "image_base",
"eval_transform": "image_base",
"process_outputs_function": "binary_logits_to_pred",
},
"camelyon17": {
"split_scheme": "official",
"model": "densenet121",
"model_kwargs": {"pretrained": False},
"train_transform": "image_base",
"eval_transform": "image_base",
"target_resolution": (96, 96),
"loss_function": "cross_entropy",
"groupby_fields": ["hospital"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
"optimizer": "SGD",
"optimizer_kwargs": {"momentum": 0.9},
"scheduler": None,
"batch_size": 32,
"lr": 0.001,
"weight_decay": 0.01,
"n_epochs": 5,
"n_groups_per_batch": 2,
"irm_lambda": 1.0,
"coral_penalty_weight": 0.1,
"algo_log_metric": "accuracy",
"process_outputs_function": "multiclass_logits_to_pred",
},
"celebA": {
"split_scheme": "official",
"model": "resnet50",
"model_kwargs": {"pretrained": True},
"train_transform": "image_base",
"eval_transform": "image_base",
"loss_function": "cross_entropy",
"groupby_fields": ["male", "y"],
"val_metric": "acc_wg",
"val_metric_decreasing": False,
"optimizer": "SGD",
"optimizer_kwargs": {"momentum": 0.9},
"scheduler": None,
"batch_size": 64,
"lr": 0.001,
"weight_decay": 0.0,
"n_epochs": 200,
"algo_log_metric": "accuracy",
"process_outputs_function": "multiclass_logits_to_pred",
},
"civilcomments": {
"split_scheme": "official",
"model": "distilbert-base-uncased",
"train_transform": "bert",
"eval_transform": "bert",
"loss_function": "cross_entropy",
"groupby_fields": ["black", "y"],
"val_metric": "acc_wg",
"val_metric_decreasing": False,
"batch_size": 16,
"lr": 1e-5,
"weight_decay": 0.01,
"n_epochs": 5,
"algo_log_metric": "accuracy",
"max_token_length": 300,
"irm_lambda": 1.0,
"coral_penalty_weight": 10.0,
"loader_kwargs": {
"num_workers": 1,
"pin_memory": True,
},
"process_outputs_function": "multiclass_logits_to_pred",
},
"fmow": {
"split_scheme": "official",
"dataset_kwargs": {
"oracle_training_set": False,
"seed": 111,
"use_ood_val": True,
},
"model": "densenet121",
"model_kwargs": {"pretrained": True},
"train_transform": "image_base",
"eval_transform": "image_base",
"loss_function": "cross_entropy",
"groupby_fields": [
"year",
],
"val_metric": "acc_worst_region",
"val_metric_decreasing": False,
"optimizer": "Adam",
"scheduler": "StepLR",
"scheduler_kwargs": {"gamma": 0.96},
"batch_size": 64,
"lr": 0.0001,
"weight_decay": 0.0,
"n_epochs": 50,
"n_groups_per_batch": 8,
"irm_lambda": 1.0,
"coral_penalty_weight": 0.1,
"algo_log_metric": "accuracy",
"process_outputs_function": "multiclass_logits_to_pred",
},
"iwildcam": {
"loss_function": "cross_entropy",
"val_metric": "F1-macro_all",
"model_kwargs": {"pretrained": True},
"train_transform": "image_base",
"eval_transform": "image_base",
"target_resolution": (448, 448),
"val_metric_decreasing": False,
"algo_log_metric": "accuracy",
"model": "resnet50",
"lr": 3e-5,
"weight_decay": 0.0,
"batch_size": 16,
"n_epochs": 12,
"optimizer": "Adam",
"split_scheme": "official",
"scheduler": None,
"groupby_fields": [
"location",
],
"n_groups_per_batch": 2,
"irm_lambda": 1.0,
"coral_penalty_weight": 10.0,
"no_group_logging": True,
"process_outputs_function": "multiclass_logits_to_pred",
},
"ogb-molpcba": {
"split_scheme": "official",
"model": "gin-virtual",
"model_kwargs": {"dropout": 0.5}, # include pretrained
"loss_function": "multitask_bce",
"groupby_fields": [
"scaffold",
],
"val_metric": "ap",
"val_metric_decreasing": False,
"optimizer": "Adam",
"batch_size": 32,
"lr": 1e-03,
"weight_decay": 0.0,
"n_epochs": 100,
"n_groups_per_batch": 4,
"irm_lambda": 1.0,
"coral_penalty_weight": 0.1,
"no_group_logging": True,
"process_outputs_function": None,
"algo_log_metric": "multitask_binary_accuracy",
},
"py150": {
"split_scheme": "official",
"model": "code-gpt-py",
"loss_function": "lm_cross_entropy",
"val_metric": "acc",
"val_metric_decreasing": False,
"optimizer": "AdamW",
"optimizer_kwargs": {"eps": 1e-8},
"lr": 8e-5,
"weight_decay": 0.0,
"n_epochs": 3,
"batch_size": 6,
"groupby_fields": [
"repo",
],
"n_groups_per_batch": 2,
"irm_lambda": 1.0,
"coral_penalty_weight": 1.0,
"no_group_logging": True,
"algo_log_metric": "multitask_accuracy",
"process_outputs_function": "multiclass_logits_to_pred",
},
"poverty": {
"split_scheme": "official",
"dataset_kwargs": {
"no_nl": False,
"fold": "A",
"oracle_training_set": False,
"use_ood_val": True,
},
"model": "resnet18_ms",
"model_kwargs": {"num_channels": 8},
"train_transform": "poverty_train",
"eval_transform": None,
"loss_function": "mse",
"groupby_fields": [
"country",
],
"val_metric": "r_wg",
"val_metric_decreasing": False,
"algo_log_metric": "mse",
"optimizer": "Adam",
"scheduler": "StepLR",
"scheduler_kwargs": {"gamma": 0.96},
"batch_size": 64,
"lr": 0.001,
"weight_decay": 0.0,
"n_epochs": 200,
"n_groups_per_batch": 8,
"irm_lambda": 1.0,
"coral_penalty_weight": 0.1,
"process_outputs_function": None,
},
"waterbirds": {
"split_scheme": "official",
"model": "resnet50",
"train_transform": "image_resize_and_center_crop",
"eval_transform": "image_resize_and_center_crop",
"resize_scale": 256.0 / 224.0,
"model_kwargs": {"pretrained": True},
"loss_function": "cross_entropy",
"groupby_fields": ["background", "y"],
"val_metric": "acc_wg",
"val_metric_decreasing": False,
"algo_log_metric": "accuracy",
"optimizer": "SGD",
"optimizer_kwargs": {"momentum": 0.9},
"scheduler": None,
"batch_size": 128,
"lr": 1e-5,
"weight_decay": 1.0,
"n_epochs": 300,
"process_outputs_function": "multiclass_logits_to_pred",
},
"yelp": {
"split_scheme": "official",
"model": "bert-base-uncased",
"train_transform": "bert",
"eval_transform": "bert",
"max_token_length": 512,
"loss_function": "cross_entropy",
"algo_log_metric": "accuracy",
"batch_size": 8,
"lr": 2e-6,
"weight_decay": 0.01,
"n_epochs": 3,
"n_groups_per_batch": 2,
"process_outputs_function": "multiclass_logits_to_pred",
},
"sqf": {
"split_scheme": "all_race",
"model": "logistic_regression",
"train_transform": None,
"eval_transform": None,
"model_kwargs": {"in_features": 104},
"loss_function": "cross_entropy",
"groupby_fields": ["y"],
"val_metric": "precision_at_global_recall_all",
"val_metric_decreasing": False,
"algo_log_metric": "accuracy",
"optimizer": "Adam",
"optimizer_kwargs": {},
"scheduler": None,
"batch_size": 4,
"lr": 5e-5,
"weight_decay": 0,
"n_epochs": 4,
"process_outputs_function": None,
},
}
##########################################
# Split-specific defaults for Amazon #
##########################################
amazon_split_defaults = {
"official": {
"groupby_fields": ["user"],
"val_metric": "10th_percentile_acc",
"val_metric_decreasing": False,
"no_group_logging": True,
},
"user": {
"groupby_fields": ["user"],
"val_metric": "10th_percentile_acc",
"val_metric_decreasing": False,
"no_group_logging": True,
},
"time": {
"groupby_fields": ["year"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
},
"time_baseline": {
"groupby_fields": ["year"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
},
}
user_baseline_splits = [
"A1CNQTCRQ35IMM_baseline",
"A1NE43T0OM6NNX_baseline",
"A1UH21GLZTYYR5_baseline",
"A20EEWWSFMZ1PN_baseline",
"A219Y76LD1VP4N_baseline",
"A37BRR2L8PX3R2_baseline",
"A3JVZY05VLMYEM_baseline",
"A9Q28YTLYREO7_baseline",
"ASVY5XSYJ1XOE_baseline",
"AV6QDP8Q0ONK4_baseline",
]
for split in user_baseline_splits:
amazon_split_defaults[split] = {
"groupby_fields": ["user"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
}
category_splits = [
"arts_crafts_and_sewing_generalization",
"automotive_generalization",
"books,movies_and_tv,home_and_kitchen,electronics_generalization",
"books_generalization",
"category_subpopulation",
"cds_and_vinyl_generalization",
"cell_phones_and_accessories_generalization",
"clothing_shoes_and_jewelry_generalization",
"digital_music_generalization",
"electronics_generalization",
"grocery_and_gourmet_food_generalization",
"home_and_kitchen_generalization",
"industrial_and_scientific_generalization",
"kindle_store_generalization",
"luxury_beauty_generalization",
"movies_and_tv,books,home_and_kitchen_generalization",
"movies_and_tv,books_generalization",
"movies_and_tv_generalization",
"musical_instruments_generalization",
"office_products_generalization",
"patio_lawn_and_garden_generalization",
"pet_supplies_generalization",
"prime_pantry_generalization",
"sports_and_outdoors_generalization",
"tools_and_home_improvement_generalization",
"toys_and_games_generalization",
"video_games_generalization",
]
for split in category_splits:
amazon_split_defaults[split] = {
"groupby_fields": ["category"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
}
########################################
# Split-specific defaults for Yelp #
########################################
yelp_split_defaults = {
"official": {
"groupby_fields": ["year"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
},
"user": {
"groupby_fields": ["user"],
"val_metric": "10th_percentile_acc",
"val_metric_decreasing": False,
"no_group_logging": True,
},
"time": {
"groupby_fields": ["year"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
},
"time_baseline": {
"groupby_fields": ["year"],
"val_metric": "acc_avg",
"val_metric_decreasing": False,
},
}
###############################
# Split-specific defaults #
###############################
split_defaults = {
"amazon": amazon_split_defaults,
"yelp": yelp_split_defaults,
}
loader_defaults = {
"loader_kwargs": {
"num_workers": 4,
"pin_memory": True,
},
"n_groups_per_batch": 4,
}
def populate_defaults(config):
"""Populates hyperparameters with defaults implied by choices of other
hyperparameters."""
assert config.dataset is not None, "dataset must be specified"
# implied defaults from choice of dataset
config = populate_config(config, dataset_defaults[config.dataset])
# implied defaults from choice of split
if (
config.dataset in split_defaults
and config.split_scheme in split_defaults[config.dataset]
):
config = populate_config(
config, split_defaults[config.dataset][config.split_scheme]
)
# implied defaults from choice of loader
config = populate_config(config, loader_defaults)
# basic checks
required_fields = [
"split_scheme",
]
for field in required_fields:
assert (
getattr(config, field) is not None
), f"Must manually specify {field} for this setup."
return config
def populate_config(config, template: dict, force_compatibility=False):
"""Populates missing (key, val) pairs in config with (key, val) in template.
Example usage: populate config with defaults
Args:
- config: namespace
- template: dict
- force_compatibility: option to raise errors if config.key != template[key]
"""
if template is None:
return config
d_config = vars(config)
for key, val in template.items():
if not isinstance(val, dict): # config[key] expected to be a non-index-able
if key not in d_config or d_config[key] is None:
d_config[key] = val
elif d_config[key] != val and force_compatibility:
raise ValueError(f"Argument {key} must be set to {val}")
else: # config[key] expected to be a kwarg dict
if key not in d_config:
d_config[key] = {}
for kwargs_key, kwargs_val in val.items():
if kwargs_key not in d_config[key] or d_config[key][kwargs_key] is None:
d_config[key][kwargs_key] = kwargs_val
elif d_config[key][kwargs_key] != kwargs_val and force_compatibility:
raise ValueError(
f"Argument {key}[{kwargs_key}] must be set to {val}"
)
return config
|
meerkat-main
|
meerkat/datasets/wilds/config.py
|
"""WILDS integration for Meerkat."""
from __future__ import annotations
from argparse import Namespace
from typing import TYPE_CHECKING, List
import numpy as np
import pandas as pd
from meerkat.columns.abstract import Column
from meerkat.columns.tensor.numpy import NumPyTensorColumn
from meerkat.dataframe import DataFrame
from meerkat.tools.lazy_loader import LazyLoader
from .config import base_config, populate_defaults
from .transforms import initialize_transform
torch_collate = LazyLoader("torch.utils.data._utils.collate")
if TYPE_CHECKING:
from datasets import DatasetInfo
try:
import wilds
from wilds.datasets.wilds_dataset import WILDSSubset
_wilds_available = True
except ImportError:
_wilds_available = False
def get_wilds_dataframe(
dataset_name: str,
root_dir: str,
version: str = None,
column_names: List[str] = None,
info: DatasetInfo = None,
split: str = None,
use_transform: bool = True,
include_raw_input: bool = True,
):
"""Get a DataFrame that holds a WildsInputColumn alongside NumpyColumns for
targets and metadata.
Example:
Run inference on the dataset and store predictions alongside the data.
.. code-block:: python
df = get_wilds_dataframe("fmow", root_dir="/datasets/", split="test")
model = ... # get the model
model.to(0).eval()
@torch.no_grad()
def predict(batch: dict):
out = torch.softmax(model(batch["input"].to(0)), axis=-1)
return {"pred": out.cpu().numpy().argmax(axis=-1)}
df = df.update(function=predict, batch_size=128, is_batched_fn=True)
Args:
dataset_name (str, optional): dataset name. Defaults to `"fmow"`.
version (str, optional): dataset version number, e.g., '1.0'.
Defaults to the latest version.
root_dir (str): the directory where the WILDS dataset is downloaded.
See https://wilds.stanford.edu/ for download instructions.
split (str, optional): see . Defaults to None.
use_transform (bool, optional): Whether to apply the transform from the
WILDS example directory on load. Defaults to True.
column_names (List[str], optional): [description]. Defaults to None.
info (DatasetInfo, optional): [description]. Defaults to None.
use_transform (bool, optional): [description]. Defaults to True.
include_raw_input (bool, optional): include a column for the input without
the transform applied – useful for visualizing images. Defaults to True.
"""
if not _wilds_available:
raise ImportError(
"The WILDS package is not installed. To use the RG WILDS module, please "
"install by following instructions at "
"https://wilds.stanford.edu/get_started/"
)
input_column = WILDSInputColumn(
dataset_name=dataset_name,
version=version,
root_dir=root_dir,
split=split,
use_transform=use_transform,
)
output_column = input_column.get_y_column()
metadata_columns = input_column.get_metadata_columns()
data = {"input": input_column, "y": output_column, **metadata_columns}
if include_raw_input:
data["raw_input"] = input_column.copy()
data["raw_input"].use_transform = False
data["raw_input"]._data.transform = lambda x: x
data["raw_input"]._collate_fn = lambda x: x
return DataFrame(
data,
column_names=column_names,
info=info,
split=split,
)
class WILDSInputColumn(Column):
def __init__(
self,
dataset_name: str = "fmow",
version: str = None,
root_dir: str = None,
split: str = None,
use_transform: bool = True,
**kwargs,
):
"""A column wrapper around a WILDS dataset that can lazily load the
inputs for each dataset.
Args:
dataset_name (str, optional): dataset name. Defaults to `"fmow"`.
version (str, optional): dataset version number, e.g., '1.0'.
Defaults to the latest version.
root_dir (str, optional): the directory . Defaults to None.
split (str, optional): the split . Defaults to None.
use_transform (bool, optional): Whether to apply the transform from the
WILDS example directory on load. Defaults to True.
"""
self.dataset_name = dataset_name
self.version = version
self.root_dir = root_dir
self.split = split
self.use_transform = use_transform
dataset = wilds.get_dataset(
dataset=dataset_name, version=version, root_dir=root_dir
)
self.root = dataset.root
self.split = split
self.metadata_columns = {}
# get additional, dataset-specific metadata columns
if dataset_name == "fmow":
metadata_df = dataset.metadata
metadata_df = metadata_df[metadata_df.split != "seq"]
if self.split is not None:
metadata_df = metadata_df[
dataset.split_array == dataset.split_dict[self.split]
]
self.metadata_columns.update(
{
field: NumPyTensorColumn(data=series.values)
for field, series in metadata_df.items()
}
)
if use_transform:
# we need a WILDS config in order to initialize transform
config = Namespace(dataset=dataset_name, **base_config)
config = populate_defaults(config)
transform_name = (
config.train_transform if split == "train" else config.eval_transform
)
transform = initialize_transform(
transform_name, config=config, dataset=dataset
)
# wilds defaults to torch torch_collate.`default_collate`
collate = (
torch_collate.default_collate
if dataset.collate is None
else dataset.collate
)
else:
transform = collate = None
if split is not None:
dataset = dataset.get_subset(split, transform=transform, frac=1.0)
elif transform is not None:
# only WILDSSubset supports applying a transform, so we use it even if no
# split is applied
dataset = WILDSSubset(dataset, np.arange(len(dataset)), transform=transform)
self.metadata_columns.update(
{
f"meta_{field}": NumPyTensorColumn(data=dataset.metadata_array[:, idx])
for idx, field in enumerate(dataset.metadata_fields)
}
)
super(WILDSInputColumn, self).__init__(data=dataset, collate_fn=collate)
def get_y_column(self):
"""Get a NumpyArrayColumn holding the targets for the dataset.
Warning: `WildsDataset`s may remap indexes in arbitrary ways so it's important
not to directly try to access the underlying data structures, instead relying on
the `y_array` and `metadata_array` properties which are universal across WILDS
datasets.
"""
return NumPyTensorColumn(data=self.data.y_array)
def get_metadata_columns(self):
return self.metadata_columns
def _get_cell(self, index: int, materialize: bool = True):
# only get input (not y and meta)
return self.data[index][0]
def _repr_pandas_(
self,
) -> pd.Series:
series = pd.Series(np.arange(len(self._data)))
return series.apply(
lambda x: f"WildsInput(path={self.root}/images/rgb_img_{x}.png)"
)
@classmethod
def _state_keys(cls) -> set:
"""List of attributes that describe the state of the object.
Warning: this write
is very lightweight, only the name of the dataset (`dataset_name`), the
directory of the dataset `root_dir`, and the other args to `__init__` are
written to disk. If the data at `root_dir` is modified, `read` will return a
column with different data.
"""
return {
"dataset_name",
"version",
"root_dir",
"split",
"use_transform",
"_visible_rows",
}
|
meerkat-main
|
meerkat/datasets/wilds/__init__.py
|
import json
import os
from typing import List
import pandas as pd
import meerkat as mk
IMAGES_URLS = {
"train": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train.tar.gz",
"val": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/val.tar.gz",
"test": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/public_test.tar.gz", # noqa: E501
}
INFO_URLS = {
"train": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/train.json.tar.gz", # noqa: E501
"val": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/val.json.tar.gz",
"test": "https://ml-inat-competition-datasets.s3.amazonaws.com/2021/public_test.json.tar.gz", # noqa: E501
}
def build_inaturalist_df(
dataset_dir: str, download: bool = True, splits: List[str] = None
) -> mk.DataFrame:
"""Build a DataFrame from the inaturalist dataset.
Args:
dataset_dir: The directory to store the dataset in.
download: Whether to download the dataset if it does not yet exist.
splits: A list of splits to include. Defaults to all splits.
"""
from torchvision.datasets.utils import download_and_extract_archive
if splits is None:
splits = ["train", "test", "val"]
dfs = []
for split in splits:
if not os.path.exists(os.path.join(dataset_dir, split)) and download:
download_and_extract_archive(
IMAGES_URLS[split], download_root=dataset_dir, remove_finished=True
)
if not os.path.exists(os.path.join(dataset_dir, f"{split}.json")) and download:
download_and_extract_archive(
INFO_URLS[split], download_root=dataset_dir, remove_finished=True
)
with open(os.path.join(dataset_dir, f"{split}.json"), "r") as f:
info = json.load(f)
df = mk.DataFrame(info["images"])
# need to rename "id" so there aren't conflicts with the "id" in other
# dataframes (see annotations below)
df["image_id"] = df["id"]
df.remove_column("id")
# add image column
df["image"] = mk.ImageColumn(df["file_name"], base_dir=dataset_dir)
df["date"] = pd.to_datetime(df["date"])
# add annotations for each image
if split != "test": # no annotations for test set
annotation_df = mk.DataFrame(info["annotations"])
annotation_df["annotation_id"] = annotation_df["id"]
annotation_df.remove_column("id")
df = df.merge(annotation_df, on="image_id")
# join on the category table to get the category name
category_df = mk.DataFrame(info["categories"])
category_df["category_id"] = category_df["id"]
category_df.remove_column("id")
df = df.merge(category_df, on="category_id")
dfs.append(df)
return mk.concat(dfs) if len(dfs) > 1 else dfs[0]
|
meerkat-main
|
meerkat/datasets/inaturalist/__init__.py
|
import json
import os
from typing import Dict, List, Union
import meerkat as mk
def build_audioset_df(
dataset_dir: str,
splits: List[str] = None,
audio_column: bool = True,
overwrite: bool = False,
) -> Dict[str, mk.DataFrame]:
"""Build DataFrames for the audioset dataset downloaded to ``dataset_dir``.
By default, the resulting DataFrames will be written to ``dataset_dir``
under the filenames "audioset_examples.mk" and "audioset_labels.mk". If
these files already exist and ``overwrite`` is False, the DataFrames will
not be built anew, and instead will be simply loaded from disk.
Args:
dataset_dir: The directory where the dataset is stored
download: Whether to download the dataset
splits: A list of splits to include. Defaults to ["eval_segments"].
Other splits: "balanced_train_segments", "unbalanced_train_segments".
audio_column (bool): Whether to include a :class:`~meerkat.AudioColumn`.
Defaults to True.
overwrite (bool): Whether to overwrite existing DataFrames saved to disk.
Defaults to False.
"""
if splits is None:
splits = ["eval_segments"]
if (
os.path.exists(os.path.join(dataset_dir, "audioset_examples.mk"))
and os.path.exists(os.path.join(dataset_dir, "audioset_labels.mk"))
and not overwrite
):
return {
"examples": mk.DataFrame.read(
os.path.join(dataset_dir, "audioset_examples.mk")
),
"labels": mk.DataFrame.read(
os.path.join(dataset_dir, "audioset_labels.mk")
),
}
dfs = []
label_rows = []
for split in splits:
if not os.path.exists(os.path.join(dataset_dir, f"{split}.csv")):
raise ValueError(f"{split}.csv not found.")
df = mk.DataFrame.from_csv(
os.path.join(dataset_dir, f"{split}.csv"),
names=["YTID", "start_seconds", "end_seconds", "positive_labels"],
skiprows=3,
delimiter=", ",
engine="python", # suppresses warning
)
df["split"] = [split for i in range(len(df))]
df["audio_path"] = df.map(
lambda row: os.path.join(
dataset_dir,
split,
"YTID={}_st={}_et={:.0f}.wav".format(
row["YTID"], row["start_seconds"], row["end_seconds"]
),
),
)
label_rows.extend(
[
{"YTID": row["YTID"], "label_id": label_id}
for row in df[["positive_labels", "YTID"]]
for label_id in row["positive_labels"].strip('"').split(",")
]
)
df.remove_column("positive_labels")
# Filter missing audio
df = df[df["audio_path"].apply(os.path.exists)]
if audio_column:
df["audio"] = mk.AudioColumn(df["audio_path"])
dfs.append(df)
dataset = {
"examples": mk.concat(dfs) if len(dfs) > 1 else dfs[0],
"labels": mk.DataFrame(label_rows),
}
dataset["examples"].write(os.path.join(dataset_dir, "audioset_examples.mk"))
dataset["labels"].write(os.path.join(dataset_dir, "audioset_labels.mk"))
return dataset
def build_ontology_df(dataset_dir: str) -> Dict[str, mk.DataFrame]:
"""Build a DataFrame from the ontology.json file.
Args:
dataset_dir: The directory where the ontology.json file is stored
"""
data = json.load(open(os.path.join(dataset_dir, "ontology.json")))
df = mk.DataFrame.from_dict(data)
relations = [
{"parent_id": row["id"], "child_id": child_id}
for row in df[["id", "child_ids"]]
for child_id in row["child_ids"]
]
df.remove_column("child_ids")
df.remove_column("positive_examples")
df.remove_column("restrictions")
return {"sounds": df, "relations": mk.DataFrame(relations)}
def find_submids(
id: Union[List[str], str],
relations: mk.DataFrame = None,
dataset_dir: str = None,
) -> List[str]:
"""Returns a list of IDs of all subcategories of an audio category.
Args:
ids: ID or list of IDs for which to find the subcategories
df: A DataFrame built from the ontology.json file.
dataset_dir: Alternatively, the directory where the ontology.json file is stored
can be provided to construct a DataFrame
"""
if (not relations) == (not dataset_dir):
raise ValueError("Must pass either `relations` or `dataset_dir` but not both.")
if dataset_dir is not None:
ontology = build_ontology_df(dataset_dir=dataset_dir)
relations = ontology["relations"]
submids = set()
queue = id if isinstance(id, list) else [id]
while len(queue):
parent_mid = queue[0]
queue.pop(0)
child_ids = relations[relations["parent_id"] == parent_mid]["child_id"]
queue.extend(child_ids)
submids.update(child_ids)
return list(submids)
|
meerkat-main
|
meerkat/datasets/audioset/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.